code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import math
def lowerCAmelCase_ ( __lowerCAmelCase )-> list:
'''simple docstring'''
UpperCAmelCase : str =[True] * n
UpperCAmelCase : Optional[int] =False
UpperCAmelCase : Any =False
UpperCAmelCase : Optional[Any] =True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase : Dict =i * 2
while index < n:
UpperCAmelCase : Union[str, Any] =False
UpperCAmelCase : List[str] =index + i
UpperCAmelCase : Tuple =[2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def lowerCAmelCase_ ( __lowerCAmelCase = 99_99_66_66_33_33 )-> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_00
UpperCAmelCase : str =prime_sieve(__lowerCAmelCase )
UpperCAmelCase : Any =0
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Optional[Any] =primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase : List[str] =primes[prime_index + 1]
UpperCAmelCase : Dict =last_prime**2
UpperCAmelCase : int =next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase : Dict =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase : Optional[Any] =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase : Optional[Any] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
import random
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
A__ : List[Any] =num - 1
A__ : Union[str, Any] =0
while s % 2 == 0:
A__ : Dict =s // 2
t += 1
for _ in range(5 ):
A__ : Union[str, Any] =random.randrange(2, num - 1 )
A__ : List[str] =pow(__snake_case, __snake_case, __snake_case )
if v != 1:
A__ : Tuple =0
while v != (num - 1):
if i == t - 1:
return False
else:
A__ : Optional[Any] =i + 1
A__ : List[str] =(v**2) % num
return True
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
A__ : Any =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__snake_case )
def __lowerCamelCase ( __snake_case : int = 1_024 ) -> int:
"""simple docstring"""
while True:
A__ : int =random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(__snake_case ):
return num
if __name__ == "__main__":
__snake_case : Optional[Any] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 134 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
def lowercase__ ( __snake_case : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowercase__ ( __snake_case : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A : Tuple = logging.get_logger(__name__)
__A : Tuple = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bloom"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Optional[Any] , __UpperCamelCase : int=2_5_0_8_8_0 , __UpperCamelCase : Tuple=6_4 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : str=8 , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=1 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : Any , )->Any:
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , __UpperCAmelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = apply_residual_connection_post_layernorm
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.12""")
def __init__( self : Optional[int] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : str = "default" , __UpperCamelCase : List[PatchingSpec] = None , __UpperCamelCase : bool = False , )->List[str]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def lowercase__ ( self : Dict )->Mapping[str, Mapping[int, str]]:
_UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' , inverted_values_shape=__UpperCAmelCase )
_UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase__ ( self : Any )->int:
return self._config.n_layer
@property
def lowercase__ ( self : int )->int:
return self._config.n_head
@property
def lowercase__ ( self : Any )->float:
return 1e-3
def lowercase__ ( self : List[str] , __UpperCamelCase : "PreTrainedTokenizer" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , )->Mapping[str, Any]:
_UpperCAmelCase = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
_UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Tuple )->int:
return 1_3
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A :
'''simple docstring'''
A__ = None
A__ = None
A__ = None # sigma(t_i)
@classmethod
def lowerCamelCase__ (cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return cls()
@dataclass
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = 42
A__ = 42
A__ = 42
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return True
@register_to_config
def __init__(self : Optional[int] , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 100 , _UpperCAmelCase : float = 1.007 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 0.05 , _UpperCAmelCase : float = 50 , ) -> int:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : KarrasVeSchedulerState , _UpperCAmelCase : int , _UpperCAmelCase : Tuple = () ) -> KarrasVeSchedulerState:
"""simple docstring"""
lowercase__ = jnp.arange(0 , __UpperCAmelCase )[::-1].copy()
lowercase__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__UpperCAmelCase , schedule=jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , timesteps=__UpperCAmelCase , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : KarrasVeSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ = random.split(__UpperCAmelCase , num=1 )
lowercase__ = self.config.s_noise * random.normal(key=__UpperCAmelCase , shape=sample.shape )
lowercase__ = sigma + gamma * sigma
lowercase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : KarrasVeSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ = sample_hat + sigma_hat * model_output
lowercase__ = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__UpperCAmelCase , derivative=__UpperCAmelCase , state=__UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : KarrasVeSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ = sample_prev + sigma_prev * model_output
lowercase__ = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__UpperCAmelCase , derivative=__UpperCAmelCase , state=__UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : KarrasVeSchedulerState , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : int=9_9 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Any="last" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=0 , ) -> List[str]:
a_ : List[str] = parent
a_ : Any = batch_size
a_ : int = seq_length
a_ : Union[str, Any] = is_training
a_ : Dict = use_input_lengths
a_ : Union[str, Any] = use_token_type_ids
a_ : int = use_labels
a_ : Union[str, Any] = gelu_activation
a_ : List[str] = sinusoidal_embeddings
a_ : str = causal
a_ : Dict = asm
a_ : Dict = n_langs
a_ : List[Any] = vocab_size
a_ : Union[str, Any] = n_special
a_ : List[Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Optional[int] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : Optional[Any] = type_sequence_label_size
a_ : Optional[Any] = initializer_range
a_ : List[Any] = num_labels
a_ : Union[str, Any] = num_choices
a_ : List[str] = summary_type
a_ : Tuple = use_proj
a_ : List[str] = scope
a_ : List[str] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Any = None
if self.use_input_lengths:
a_ : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a_ : Optional[Any] = None
if self.use_token_type_ids:
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a_ : List[str] = None
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Dict = ids_tensor([self.batch_size] , 2 ).float()
a_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]:
a_ : List[Any] = XLMModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : List[Any] = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
a_ : Union[str, Any] = model(__UpperCAmelCase , langs=__UpperCAmelCase )
a_ : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Any:
a_ : Union[str, Any] = XLMWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : str = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
a_ : Union[str, Any] = XLMForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : Dict = model(__UpperCAmelCase )
a_ : Optional[int] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
a_ : Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , ) -> List[Any]:
a_ : Any = XLMForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : Union[str, Any] = model(__UpperCAmelCase )
a_ : List[str] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
a_ : Union[str, Any] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((a_ ) , ) : Optional[Any] = result_with_labels.to_tuple()
a_ : Dict = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((a_ ) , ) : Optional[int] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Dict:
a_ : List[Any] = XLMForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : Optional[Any] = model(__UpperCAmelCase )
a_ : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , ) -> Optional[Any]:
a_ : int = self.num_labels
a_ : Any = XLMForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , ) -> str:
a_ : Any = self.num_choices
a_ : Union[str, Any] = XLMForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : List[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ : str = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> int:
a_ : Dict = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
a_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : str = XLMModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int=1 ) -> Dict:
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(__UpperCAmelCase ) )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
a_ : int = min_length + idx + 1
a_ : Optional[Any] = min_length + idx + 1
a_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=1 ) -> Dict:
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCAmelCase ) , )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
a_ : int = min_length + idx + 1
a_ : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCAmelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : int = XLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : List[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__UpperCAmelCase )
a_ : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__UpperCAmelCase ) # the president
a_ : Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a_ : Optional[Any] = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCAmelCase )
| 32 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
from math import sqrt
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def a_ ( lowerCamelCase : Tuple ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase = list(range(2 , n + 1 ) )
lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase = 0
# filters actual prime numbers.
lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Optional[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Dict ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase = 2
lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : List[str] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : Optional[int] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( lowerCamelCase : Union[str, Any] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase = get_prime_numbers(lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
# run variable for while-loops.
lowerCAmelCase = 0
lowerCAmelCase = None
# exit variable. for break up the loops
lowerCAmelCase = True
while i < len_pn and loop:
lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( lowerCamelCase : Dict , lowerCamelCase : int ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 0
while numbera != 0:
lowerCAmelCase = numbera % numbera
lowerCAmelCase = numbera
lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase = 0
lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase = p_number_a + 1 # jump to the next number
lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( lowerCamelCase : Optional[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( lowerCamelCase : List[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( lowerCamelCase : str ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( lowerCamelCase : List[str] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase = ans
ans += fiba
lowerCAmelCase = tmp
return ans
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_A : Union[str, Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = None ) -> Any:
lowerCamelCase__ : Optional[int] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
lowerCamelCase__ : Optional[Any] = to_pil_image(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : str = pil_image.size
lowerCamelCase__ : Dict = pytesseract.image_to_data(UpperCamelCase , lang=UpperCamelCase , output_type="""dict""" , config=UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowerCamelCase__ : List[Any] = [idx for idx, word in enumerate(UpperCamelCase ) if not word.strip()]
lowerCamelCase__ : List[Any] = [word for idx, word in enumerate(UpperCamelCase ) if idx not in irrelevant_indices]
lowerCamelCase__ : Optional[int] = [coord for idx, coord in enumerate(UpperCamelCase ) if idx not in irrelevant_indices]
lowerCamelCase__ : str = [coord for idx, coord in enumerate(UpperCamelCase ) if idx not in irrelevant_indices]
lowerCamelCase__ : Dict = [coord for idx, coord in enumerate(UpperCamelCase ) if idx not in irrelevant_indices]
lowerCamelCase__ : Optional[Any] = [coord for idx, coord in enumerate(UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase__ : Dict = []
for x, y, w, h in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase )
# finally, normalize the bounding boxes
lowerCamelCase__ : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
assert len(UpperCamelCase ) == len(UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase ( _lowercase ):
a = ["""pixel_values"""]
def __init__( self: int , UpperCamelCase__: bool = True , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = "" , **UpperCamelCase__: Any , ):
super().__init__(**__UpperCAmelCase )
lowerCamelCase__ : Dict = size if size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : Union[str, Any] = resample
lowerCamelCase__ : Optional[int] = apply_ocr
lowerCamelCase__ : List[Any] = ocr_lang
lowerCamelCase__ : int = tesseract_config
def lowerCamelCase_ ( self: Any , UpperCamelCase__: np.ndarray , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : List[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCamelCase__ : Dict = (size["""height"""], size["""width"""])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: ImageInput , UpperCamelCase__: bool = None , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = None , UpperCamelCase__: bool = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : int = size if size is not None else self.size
lowerCamelCase__ : Dict = get_size_dict(__UpperCAmelCase )
lowerCamelCase__ : List[str] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase__ : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase__ : Union[str, Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple = [to_numpy_array(__UpperCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowerCamelCase__ : Any = []
lowerCamelCase__ : Tuple = []
for image in images:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCamelCase__ : List[str] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCamelCase__ : Optional[int] = [flip_channel_order(__UpperCAmelCase ) for image in images]
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCamelCase__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCamelCase__ : str = words_batch
lowerCamelCase__ : Optional[Any] = boxes_batch
return data
| 41 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowerCAmelCase_ :str = [True] * (num + 1)
lowerCAmelCase_ :str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase__ ):
lowerCAmelCase_ :Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 84 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
lowerCAmelCase :Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : str = (3, 32, 128)
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__UpperCAmelCase) + """\n""")
UpperCAmelCase__ : Dict = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , __UpperCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
def snake_case__ ( self , **_lowerCamelCase):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def snake_case__ ( self , **_lowerCamelCase):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def snake_case__ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
UpperCAmelCase__ : List[Any] = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1))
return image_input
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase__ : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase__ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
UpperCAmelCase__ : List[str] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0)
UpperCAmelCase__ : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
def snake_case__ ( self):
UpperCAmelCase__ : str = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Tuple = image_processor(__UpperCAmelCase , return_tensors="""np""")
UpperCAmelCase__ : Any = processor(images=__UpperCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : str = """test"""
UpperCAmelCase__ : Tuple = processor(text=__UpperCAmelCase)
UpperCAmelCase__ : Optional[int] = tokenizer(__UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : Optional[Any] = """test"""
UpperCAmelCase__ : Any = self.prepare_image_inputs()
UpperCAmelCase__ : Dict = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.get_image_processor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Dict = processor.char_decode(__UpperCAmelCase)
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(__UpperCAmelCase)
UpperCAmelCase__ : Dict = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
UpperCAmelCase__ : Tuple = torch.randn(1 , 27 , 38)
UpperCAmelCase__ : Optional[Any] = torch.randn(1 , 27 , 5_0257)
UpperCAmelCase__ : Dict = torch.randn(1 , 27 , 3_0522)
UpperCAmelCase__ : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""]) | 163 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowercase_ , lowercase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowercase_ = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowercase_ = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase_ = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
'''simple docstring'''
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[str] =path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[Any] =path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase : List[Any] =expected_configs[0]
assert expected_config in infos
UpperCAmelCase : Union[str, Any] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Any =get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
UpperCAmelCase : List[Any] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__snake_case : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase :
'''simple docstring'''
__snake_case = PegasusConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[int]=99 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=20 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=0 , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =parent
A__ : Dict =batch_size
A__ : int =seq_length
A__ : Optional[Any] =is_training
A__ : Tuple =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : int =num_hidden_layers
A__ : List[Any] =num_attention_heads
A__ : Dict =intermediate_size
A__ : List[Any] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : Union[str, Any] =eos_token_id
A__ : Optional[Any] =pad_token_id
A__ : List[str] =bos_token_id
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A__ : Optional[int] =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A__ : Optional[Any] =np.concatenate([input_ids, eos_tensor] , axis=1 )
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str =prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowercase__ ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ : int =20
A__ : Tuple =model_class_name(__UpperCAmelCase )
A__ : Optional[Any] =model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : Dict =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : str =model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A__ : Any =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
A__ : str =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : List[str] =model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A__ : Any =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : List[str] =model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
A__ : Tuple =model.decode(__UpperCAmelCase , __UpperCAmelCase )
A__ : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
A__ : Optional[Any] =20
A__ : str =model_class_name(__UpperCAmelCase )
A__ : str =model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : Dict =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : Optional[Any] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ : Optional[int] =model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A__ : Optional[int] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : str =model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A__ : Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : Dict =model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A__ : Dict =model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
A__ : Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : int, __snake_case : str=None, __snake_case : int=None, ) -> int:
"""simple docstring"""
if attention_mask is None:
A__ : List[Any] =np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A__ : List[Any] =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : Dict =FlaxPegasusModelTester(self )
A__ : str =ConfigTester(self , config_class=__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Union[str, Any] =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A__ : int =model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : List[Any] ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
A__ : List[Any] =encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : Optional[Any] =encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Optional[Any] =model_class(__UpperCAmelCase )
A__ : str =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
A__ : Dict ={
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
A__ : int =decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : Optional[Any] =decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ : Dict =model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCAmelCase )
A__ : Any =np.ones((1, 1) )
A__ : Any =model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
A__ : Union[str, Any] =PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
A__ : List[Any] =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
A__ : Dict =[
"""California\'s largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.""",
]
A__ : Optional[int] =tokenizer(__UpperCAmelCase , return_tensors="""np""" , truncation=__UpperCAmelCase , max_length=5_12 , padding=__UpperCAmelCase )
A__ : str =model.generate(**__UpperCAmelCase , num_beams=2 ).sequences
A__ : List[str] =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert tgt_text == decoded
| 134 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from __future__ import annotations
import time
import numpy as np
__UpperCAmelCase = [8, 5, 9, 7]
__UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None:
UpperCAmelCase_ : List[str] = claim_vector
UpperCAmelCase_ : Union[str, Any] = allocated_resources_table
UpperCAmelCase_ : Any = maximum_claim_table
def __UpperCAmelCase ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self ) -> dict[int, list[int]]:
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> None:
UpperCAmelCase_ : List[str] = self.__need()
UpperCAmelCase_ : Any = self.__allocated_resources_table
UpperCAmelCase_ : Any = self.__available_resources()
UpperCAmelCase_ : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
UpperCAmelCase_ : str = False
for each_need in need_list:
UpperCAmelCase_ : str = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
UpperCAmelCase_ : List[str] = False
break
if execution:
UpperCAmelCase_ : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase_ : str = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
UpperCAmelCase_ : int = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __UpperCAmelCase ( self ) -> Dict:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , **__UpperCamelCase : Optional[int] )->Dict:
super().__init__(**__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self : Optional[Any] , __UpperCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCamelCase : Union[str, List[str]] = None , **__UpperCamelCase : Dict , )->Optional[int]:
if "text_queries" in kwargs:
_UpperCAmelCase = kwargs.pop('''text_queries''' )
if isinstance(__UpperCAmelCase , (str, Image.Image) ):
_UpperCAmelCase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
return results
def lowercase__ ( self : Union[str, Any] , **__UpperCamelCase : Optional[int] )->Dict:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs['''threshold''']
if "top_k" in kwargs:
_UpperCAmelCase = kwargs['''top_k''']
return {}, {}, postprocess_params
def lowercase__ ( self : str , __UpperCamelCase : int )->Optional[Any]:
_UpperCAmelCase = load_image(inputs['''image'''] )
_UpperCAmelCase = inputs['''candidate_labels''']
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase = candidate_labels.split(''',''' )
_UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCAmelCase ):
_UpperCAmelCase = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.image_processor(__UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int )->str:
_UpperCAmelCase = model_inputs.pop('''target_size''' )
_UpperCAmelCase = model_inputs.pop('''candidate_label''' )
_UpperCAmelCase = model_inputs.pop('''is_last''' )
_UpperCAmelCase = self.model(**__UpperCAmelCase )
_UpperCAmelCase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def lowercase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[str]=None )->List[Any]:
_UpperCAmelCase = []
for model_output in model_outputs:
_UpperCAmelCase = model_output['''candidate_label''']
_UpperCAmelCase = BaseModelOutput(__UpperCAmelCase )
_UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase = outputs['''scores'''][index].item()
_UpperCAmelCase = self._get_bounding_box(outputs['''boxes'''][index][0] )
_UpperCAmelCase = {'''score''': score, '''label''': label, '''box''': box}
results.append(__UpperCAmelCase )
_UpperCAmelCase = sorted(__UpperCAmelCase , key=lambda __UpperCamelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k:
_UpperCAmelCase = results[:top_k]
return results
def lowercase__ ( self : Any , __UpperCamelCase : "torch.Tensor" )->Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 260 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
lowercase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 305 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__A )[-10:]
if __name__ == "__main__":
print(solution())
| 32 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Union[str, Any] = 42
lowerCamelCase : Any = None
lowerCamelCase : List[str] = None
__snake_case =namedtuple("""CoinsDistribResult""", """moves excess""")
def a_ ( lowerCamelCase : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase , lowerCAmelCase = get_distrib(node.left )
lowerCAmelCase , lowerCAmelCase = get_distrib(node.right )
lowerCAmelCase = 1 - left_distrib_excess
lowerCAmelCase = 1 - right_distrib_excess
lowerCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
lowerCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Union[str, Any] =logging.get_logger(__name__)
_A : Union[str, Any] ={'''tokenizer_file''': '''tokenizer.json'''}
_A : List[Any] ={
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["""input_ids""", """attention_mask"""]
a = None
def __init__( self: Dict , UpperCamelCase__: str=None , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int="<unk>" , UpperCamelCase__: str="<s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Union[str, Any]=False , **UpperCamelCase__: Optional[int] , ):
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCamelCase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
lowerCamelCase__ : List[Any] = getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : str = add_prefix_space
lowerCamelCase__ : List[Any] = pre_tok_class(**__UpperCAmelCase )
lowerCamelCase__ : Tuple = add_prefix_space
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: int ):
lowerCamelCase__ : str = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: Tuple , **UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
lowerCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "[email protected]"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 84 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = '''Salesforce/blip-image-captioning-base'''
lowerCAmelCase :List[Any] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
lowerCAmelCase :Any = '''image_captioner'''
lowerCAmelCase :Optional[Any] = AutoModelForVisionaSeq
lowerCAmelCase :Any = ['''image''']
lowerCAmelCase :str = ['''text''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""vision"""])
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.pre_processor(images=__UpperCAmelCase , return_tensors="""pt""")
def snake_case__ ( self , _lowerCamelCase):
return self.model.generate(**__UpperCAmelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)[0].strip() | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from math import factorial
lowercase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def a ( A__ : int ) -> int:
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A__ ) )
def a ( A__ : int = 60 , A__ : int = 1000000 ) -> int:
"""simple docstring"""
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
_lowercase =0
# the cached sizes of the previous chains
_lowercase ={}
for start_chain_element in range(1 , A__ ):
# The temporary set will contain the elements of the chain
_lowercase =set()
_lowercase =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowercase =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A__ )
chain_set_length += 1
_lowercase =digit_factorial_sum(A__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowercase =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__snake_case = '''scheduler_config.json'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = 3
__lowerCamelCase : List[str] = 4
__lowerCamelCase : Any = 5
__lowerCamelCase : Optional[int] = 6
__lowerCamelCase : Optional[Any] = 7
__lowerCamelCase : int = 8
__lowerCamelCase : Optional[Any] = 9
__lowerCamelCase : Dict = 10
__lowerCamelCase : Tuple = 11
__lowerCamelCase : Dict = 12
__lowerCamelCase : Dict = 13
__lowerCamelCase : Union[str, Any] = 14
@dataclass
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : str = 42
class __snake_case :
__lowerCamelCase : int = SCHEDULER_CONFIG_NAME
__lowerCamelCase : str = []
__lowerCamelCase : Optional[int] = True
@classmethod
def UpperCAmelCase__ ( cls , snake_case__ = None , snake_case__ = None , snake_case__=False , **snake_case__ , ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , return_commit_hash=__UpperCAmelCase , **__UpperCAmelCase , )
return cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = False , **snake_case__ ) -> List[Any]:
'''simple docstring'''
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase : str =importlib.import_module(__name__.split('''.''' )[0] )
UpperCAmelCase : Optional[int] =[
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def __lowerCamelCase ( __snake_case : int = 1_000_000 ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =0
for outer_width in range(3, (limit // 4) + 2 ):
if outer_width**2 > limit:
A__ : Dict =max(ceil(sqrt(outer_width**2 - limit ) ), 1 )
else:
A__ : Optional[Any] =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 134 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = 4_2
_snake_case : Optional[int] = 4_2
class lowerCamelCase (nn.Module ):
'''simple docstring'''
_snake_case : Tuple = 4_2
_snake_case : Optional[Any] = (1_6, 3_2, 9_6, 2_5_6)
_snake_case : List[str] = jnp.floataa
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ : Union[str, Any] = self.block_out_channels[i]
UpperCAmelCase_ : Optional[int] = self.block_out_channels[i + 1]
UpperCAmelCase_ : List[str] = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCAmelCase )
UpperCAmelCase_ : int = blocks
UpperCAmelCase_ : int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Dict = self.conv_in(__UpperCAmelCase )
UpperCAmelCase_ : int = nn.silu(__UpperCAmelCase )
for block in self.blocks:
UpperCAmelCase_ : Optional[Any] = block(__UpperCAmelCase )
UpperCAmelCase_ : Tuple = nn.silu(__UpperCAmelCase )
UpperCAmelCase_ : List[str] = self.conv_out(__UpperCAmelCase )
return embedding
@flax_register_to_config
class lowerCamelCase (nn.Module , _snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Dict = 3_2
_snake_case : int = 4
_snake_case : str = (
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''DownBlock2D''',
)
_snake_case : List[str] = False
_snake_case : List[str] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : List[str] = 2
_snake_case : Any = 8
_snake_case : Optional[int] = None
_snake_case : str = 1_2_8_0
_snake_case : Optional[Any] = 0.0
_snake_case : Union[str, Any] = False
_snake_case : Optional[int] = jnp.floataa
_snake_case : Optional[Any] = True
_snake_case : Optional[Any] = 0
_snake_case : Optional[Any] = '''rgb'''
_snake_case : Optional[Any] = (1_6, 3_2, 9_6, 2_5_6)
def __UpperCAmelCase ( self , _UpperCamelCase ) -> FrozenDict:
UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Dict = jnp.zeros(__UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase_ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase_ : Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ : Union[str, Any] = jnp.zeros(__UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : int = jax.random.split(__UpperCAmelCase )
UpperCAmelCase_ : str = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["params"]
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = self.block_out_channels
UpperCAmelCase_ : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase_ : Dict = FlaxTimestepEmbedding(__UpperCAmelCase , dtype=self.dtype )
UpperCAmelCase_ : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCAmelCase_ : Optional[int] = self.only_cross_attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Union[str, Any] = output_channel
UpperCAmelCase_ : Optional[Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(__UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCAmelCase )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ : int = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
if not is_final_block:
UpperCAmelCase_ : List[str] = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
UpperCAmelCase_ : str = down_blocks
UpperCAmelCase_ : List[str] = controlnet_down_blocks
# mid
UpperCAmelCase_ : Dict = block_out_channels[-1]
UpperCAmelCase_ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCAmelCase_ : Tuple = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1.0 , _UpperCamelCase = True , _UpperCamelCase = False , ) -> Union[FlaxControlNetOutput, Tuple]:
UpperCAmelCase_ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ : List[str] = jnp.flip(__UpperCAmelCase , axis=1 )
# 1. time
if not isinstance(__UpperCAmelCase , jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : int = jnp.expand_dims(__UpperCAmelCase , 0 )
UpperCAmelCase_ : Tuple = self.time_proj(__UpperCAmelCase )
UpperCAmelCase_ : int = self.time_embedding(__UpperCAmelCase )
# 2. pre-process
UpperCAmelCase_ : Dict = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ : str = self.conv_in(__UpperCAmelCase )
UpperCAmelCase_ : Dict = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ : Tuple = self.controlnet_cond_embedding(__UpperCAmelCase )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = down_block(__UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ : List[str] = self.mid_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(__UpperCAmelCase , self.controlnet_down_blocks ):
UpperCAmelCase_ : List[Any] = controlnet_block(__UpperCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : Dict = controlnet_down_block_res_samples
UpperCAmelCase_ : Optional[Any] = self.controlnet_mid_block(__UpperCAmelCase )
# 6. scaling
UpperCAmelCase_ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__UpperCAmelCase , mid_block_res_sample=__UpperCAmelCase )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = 1.5
_UpperCAmelCase = int(factor * num_class_images )
_UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
_UpperCAmelCase = int(factor * num_images )
_UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(desc='''downloading real regularization images''' , total=_SCREAMING_SNAKE_CASE )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
_UpperCAmelCase = class_images[count]
count += 1
try:
_UpperCAmelCase = requests.get(images['''url'''] )
if img.status_code == 200:
_UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser('''''' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__A : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A : str = 6_3_7_8_1_3_7.0
A : Union[str, Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
A : List[Any] = 6_3_7_8_1_3_7
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> float:
"""simple docstring"""
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
lowercase__ = radians(__magic_name__ )
lowercase__ = radians(__magic_name__ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(__magic_name__ ) * cos(__magic_name__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : list[tuple[float, float]] ) -> Optional[Any]:
a_ : Any = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a_ : str = len(__UpperCAmelCase ) - 1
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ : str = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ : Union[str, Any] = self.basis_function(__UpperCAmelCase )
a_ : Any = 0.0
a_ : Optional[int] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : float = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
a_ : Optional[int] = [] # x coordinates of points to plot
a_ : Tuple = [] # y coordinates of points to plot
a_ : Optional[Any] = 0.0
while t <= 1:
a_ : Any = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a_ : Tuple = [i[0] for i in self.list_of_points]
a_ : Any = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 32 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import numpy as np
def a_ ( lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray , lowerCamelCase : float = 1e-12 , lowerCamelCase : int = 100 , ):
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
lowerCAmelCase = np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase = np.dot(lowerCamelCase , lowerCamelCase )
# Normalize the resulting output vector.
lowerCAmelCase = w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase = vector.conj().T if is_complex else vector.T
lowerCAmelCase = np.dot(lowerCamelCase , np.dot(lowerCamelCase , lowerCamelCase ) )
# Check convergence.
lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase = True
lowerCAmelCase = lambda_
if is_complex:
lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def a_ ( ):
lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase = np.array([41, 4, 20] )
lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase = real_input_matrix
lowerCAmelCase = real_vector
elif problem_type == "complex":
lowerCAmelCase = complex_input_matrix
lowerCAmelCase = complex_vector
# Our implementation.
lowerCAmelCase , lowerCAmelCase = power_iteration(lowerCamelCase , lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase , lowerCAmelCase = np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_A : int ={
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =[
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _UpperCamelCase ( UpperCamelCase__ = "laptop" ):
UpperCAmelCase__ : Tuple = f'''https://www.amazon.in/laptop/s?k={product}'''
UpperCAmelCase__ : Union[str, Any] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
UpperCAmelCase__ : List[str] = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
UpperCAmelCase__ : Tuple = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
UpperCAmelCase__ : List[str] = item.ha.text
UpperCAmelCase__ : List[str] = """https://www.amazon.in/""" + item.ha.a["""href"""]
UpperCAmelCase__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
UpperCAmelCase__ : Union[str, Any] = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
UpperCAmelCase__ : Dict = """Not available"""
try:
UpperCAmelCase__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
UpperCAmelCase__ : Optional[Any] = """"""
try:
UpperCAmelCase__ : Optional[int] = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
UpperCAmelCase__ : Optional[int] = float("""nan""" )
except AttributeError:
pass
UpperCAmelCase__ : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCAmelCase__ : Tuple = """ """
UpperCAmelCase__ : List[Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__A ='headphones'
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""") | 163 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
def a ( A__ : int , A__ : bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
_lowercase =[
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_lowercase =[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(A__ , 1 ):
if n < _p:
# then we have our last prime to check
_lowercase =primes[:idx]
break
_lowercase , _lowercase =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowercase =False
for r in range(A__ ):
_lowercase =pow(A__ , d * 2**r , A__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowercase =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =3_84
UpperCAmelCase : Tuple =7
if "tiny" in model_name:
UpperCAmelCase : List[str] =96
UpperCAmelCase : Any =(2, 2, 6, 2)
UpperCAmelCase : List[str] =(3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase : List[Any] =96
UpperCAmelCase : str =(2, 2, 18, 2)
UpperCAmelCase : List[str] =(3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase : Dict =1_28
UpperCAmelCase : Tuple =(2, 2, 18, 2)
UpperCAmelCase : str =(4, 8, 16, 32)
UpperCAmelCase : int =12
UpperCAmelCase : int =5_12
elif "large" in model_name:
UpperCAmelCase : Optional[Any] =1_92
UpperCAmelCase : Any =(2, 2, 18, 2)
UpperCAmelCase : List[str] =(6, 12, 24, 48)
UpperCAmelCase : Tuple =12
UpperCAmelCase : int =7_68
# set label information
UpperCAmelCase : List[Any] =1_50
UpperCAmelCase : Dict ='''huggingface/label-files'''
UpperCAmelCase : List[str] ='''ade20k-id2label.json'''
UpperCAmelCase : List[str] =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Tuple ={int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any ={v: k for k, v in idalabel.items()}
UpperCAmelCase : str =SwinConfig(
embed_dim=__lowerCAmelCase , depths=__lowerCAmelCase , num_heads=__lowerCAmelCase , window_size=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase : Any =UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =[]
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =dct.pop(__lowerCAmelCase )
UpperCAmelCase : Optional[Any] =val
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase : Tuple =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase : int =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
UpperCAmelCase : Union[str, Any] =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple =in_proj_weight[:dim, :]
UpperCAmelCase : List[Any] =in_proj_bias[: dim]
UpperCAmelCase : Union[str, Any] =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase : int =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase : Optional[int] =in_proj_weight[
-dim :, :
]
UpperCAmelCase : int =in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] =x.shape
UpperCAmelCase : str =x.reshape(__lowerCAmelCase , 4 , in_channel // 4 )
UpperCAmelCase : Tuple =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowerCAmelCase , __lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =x.shape
UpperCAmelCase : Dict =x.reshape(__lowerCAmelCase , in_channel // 4 , 4 )
UpperCAmelCase : Union[str, Any] =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowerCAmelCase , __lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =x.shape[0]
UpperCAmelCase : Optional[Any] =x.reshape(4 , in_channel // 4 )
UpperCAmelCase : Dict =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =x.shape[0]
UpperCAmelCase : int =x.reshape(in_channel // 4 , 4 )
UpperCAmelCase : Dict =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] ={
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
UpperCAmelCase : str =model_name_to_url[model_name]
UpperCAmelCase : Any =torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' , file_name=__lowerCAmelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__lowerCAmelCase , param.shape )
UpperCAmelCase : Optional[Any] =get_upernet_config(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase : int =state_dict.pop(__lowerCAmelCase )
if "bn" in key:
UpperCAmelCase : str =key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase : Any =val
# rename keys
UpperCAmelCase : Union[str, Any] =create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase : str =reverse_correct_unfold_reduction_order(__lowerCAmelCase )
if "norm" in key:
UpperCAmelCase : int =reverse_correct_unfold_norm_order(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
UpperCAmelCase : List[Any] ='''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase : str =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
UpperCAmelCase : Tuple =SegformerImageProcessor()
UpperCAmelCase : List[str] =processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase : Tuple =model(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase : str =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase : List[str] =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase : str =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase : Tuple =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int]=0.9_99, __snake_case : Optional[int]="cosine", ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ : Optional[int] =[]
for i in range(__snake_case ):
A__ : Tuple =i / num_diffusion_timesteps
A__ : Dict =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ), __snake_case ) )
return torch.tensor(__snake_case, dtype=torch.floataa )
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = [e.name for e in KarrasDiffusionSchedulers]
__snake_case = 2
@register_to_config
def __init__( self : Tuple , lowerCAmelCase_ : int = 10_00 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ) -> str:
'''simple docstring'''
if trained_betas is not None:
A__ : Optional[Any] =torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
A__ : int =torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ : int =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ : Optional[int] =betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ : Dict =1.0 - self.betas
A__ : Dict =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
if schedule_timesteps is None:
A__ : Union[str, Any] =self.timesteps
A__ : Optional[Any] =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ : List[Any] =1 if len(__UpperCAmelCase ) > 1 else 0
else:
A__ : Dict =timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
A__ : List[str] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
A__ : Union[str, Any] =self.index_for_timestep(__UpperCAmelCase )
if self.state_in_first_order:
A__ : int =self.sigmas[step_index]
else:
A__ : int =self.sigmas_interpol[step_index]
A__ : Any =sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ) -> Any:
'''simple docstring'''
A__ : str =num_inference_steps
A__ : Dict =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ : Tuple =np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ : Dict =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ : Tuple =(np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ : Optional[Any] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ : Dict =(np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ : List[str] =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ : Union[str, Any] =torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase )
A__ : Dict =np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
A__ : Any =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ : Tuple =torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
# interpolate sigmas
A__ : Tuple =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
A__ : Union[str, Any] =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
A__ : Optional[Any] =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCAmelCase ).startswith("""mps""" ):
# mps does not support float64
A__ : str =torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=torch.floataa )
else:
A__ : Tuple =torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
# interpolate timesteps
A__ : Tuple =self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=timesteps.dtype )
A__ : str =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
A__ : List[Any] =torch.cat([timesteps[:1], interleaved_timesteps] )
A__ : Optional[int] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ : List[str] =defaultdict(__UpperCAmelCase )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =sigma.log()
# get distribution
A__ : List[Any] =log_sigma - self.log_sigmas[:, None]
# get sigmas range
A__ : List[str] =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
A__ : Union[str, Any] =low_idx + 1
A__ : Optional[Any] =self.log_sigmas[low_idx]
A__ : Optional[Any] =self.log_sigmas[high_idx]
# interpolate sigmas
A__ : List[Any] =(low - log_sigma) / (low - high)
A__ : Optional[int] =w.clamp(0 , 1 )
# transform interpolation to time range
A__ : Union[str, Any] =(1 - w) * low_idx + w * high_idx
A__ : int =t.view(sigma.shape )
return t
@property
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.sample is None
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
A__ : Any =self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
A__ : Any =timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ : List[Any] =self.sigmas[step_index]
A__ : Optional[Any] =self.sigmas_interpol[step_index + 1]
A__ : Optional[Any] =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
A__ : int =self.sigmas[step_index - 1]
A__ : Union[str, Any] =self.sigmas_interpol[step_index]
A__ : int =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ : List[Any] =0
A__ : Dict =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ : Union[str, Any] =sigma_hat if self.state_in_first_order else sigma_interpol
A__ : List[str] =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ : Any =sigma_hat if self.state_in_first_order else sigma_interpol
A__ : Union[str, Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ : Tuple =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ : Optional[int] =sigma_interpol - sigma_hat
# store for 2nd order step
A__ : Optional[Any] =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
A__ : List[Any] =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
A__ : Dict =sigma_next - sigma_hat
A__ : str =self.sample
A__ : Tuple =None
A__ : List[str] =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowercase__ ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
A__ : List[str] =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
A__ : List[str] =self.timesteps.to(original_samples.device , dtype=torch.floataa )
A__ : Optional[int] =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A__ : str =self.timesteps.to(original_samples.device )
A__ : int =timesteps.to(original_samples.device )
A__ : Optional[Any] =[self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
A__ : str =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ : List[str] =sigma.unsqueeze(-1 )
A__ : Any =original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> Any:
'''simple docstring'''
return self.config.num_train_timesteps
| 134 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import argparse
from collections import defaultdict
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__snake_case , 'r' ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Optional[Any] = F"class {class_name}("
UpperCAmelCase_ : Tuple = F"{4 * ' '}def {test_name}("
UpperCAmelCase_ : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase_ : Any = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : str = []
for line in lines:
if line.startswith(__snake_case ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__snake_case ):
UpperCAmelCase_ : Dict = True
elif in_class and in_func and (line.startswith(__snake_case ) or line.startswith(__snake_case )):
UpperCAmelCase_ : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase_ : Dict = False
else:
new_lines.append(__snake_case )
with open(__snake_case , 'w' ) as f:
for line in new_lines:
f.write(__snake_case )
def lowercase__ ( __snake_case : Tuple , __snake_case : Union[str, Any]=None ):
'''simple docstring'''
if fail is not None:
with open(__snake_case , 'r' ) as f:
UpperCAmelCase_ : Dict = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : Union[str, Any] = None
with open(__snake_case , 'r' ) as f:
UpperCAmelCase_ : Optional[Any] = f.readlines()
UpperCAmelCase_ : List[str] = defaultdict(__snake_case )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__UpperCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''glpn'''
def __init__(self : List[str] , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Dict=[2, 2, 2, 2] , _UpperCAmelCase : Optional[Any]=[8, 4, 2, 1] , _UpperCAmelCase : Dict=[32, 64, 160, 256] , _UpperCAmelCase : Any=[7, 3, 3, 3] , _UpperCAmelCase : Union[str, Any]=[4, 2, 2, 2] , _UpperCAmelCase : Optional[Any]=[1, 2, 5, 8] , _UpperCAmelCase : int=[4, 4, 4, 4] , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : Dict=64 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : List[Any]=-1 , **_UpperCAmelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = max_depth
lowercase__ = head_in_index
| 305 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[dict] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> List[Any]:
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a_ : Union[str, Any] = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
if self.streaming:
a_ : int = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ : Union[str, Any] = None
a_ : Optional[Any] = None
a_ : Any = None
a_ : int = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a_ : List[Any] = self.builder.as_dataset(
split='train' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 32 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 0.9 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) -> None:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = crop_pct
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase = int(size['height'] / crop_pct )
else:
lowerCAmelCase = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCAmelCase ) )
lowerCAmelCase = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['shortest_edge'] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCAmelCase ) )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ) -> Tuple:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Tuple , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __A ( datasets.BuilderConfig ):
a__ : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
a__ : str = PandasConfig
def _lowercase (self : Any ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase (self : str , __a : List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(__a , __a ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def _lowercase (self : str , __a : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def _lowercase (self : List[str] , __a : List[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , "rb" ) as f:
UpperCAmelCase_ = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = AlbertConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = AlbertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 50_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = set()
UpperCAmelCase_ = int((limit - 24) ** (1 / 2) )
UpperCAmelCase_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case_ ) ) )
for primea in primes:
UpperCAmelCase_ = primea * primea
for primea in primes:
UpperCAmelCase_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCAmelCase_ = primea * primea * primea * primea
UpperCAmelCase_ = square + cube + tetr
if total >= limit:
break
ret.add(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __A ( UpperCamelCase__ ):
a__ : Tuple = """big_bird"""
def __init__(self : int , __a : Optional[Any]=50358 , __a : Any=768 , __a : Optional[Any]=12 , __a : List[Any]=12 , __a : Any=3072 , __a : Tuple="gelu_new" , __a : str=0.1 , __a : int=0.1 , __a : List[str]=4096 , __a : str=2 , __a : List[str]=0.02 , __a : Any=1E-12 , __a : Any=True , __a : Any=0 , __a : Optional[Any]=1 , __a : List[Any]=2 , __a : Dict=66 , __a : Optional[int]="block_sparse" , __a : List[Any]=True , __a : Tuple=False , __a : Any=64 , __a : Optional[Any]=3 , __a : Tuple=None , **__a : Union[str, Any] , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rescale_embeddings
UpperCAmelCase_ = attention_type
UpperCAmelCase_ = use_bias
UpperCAmelCase_ = block_size
UpperCAmelCase_ = num_random_blocks
UpperCAmelCase_ = classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: List[Any] ={
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__(self : str , __a : Optional[Any] , __a : Union[str, Any]=14 , __a : List[str]=7 , __a : Union[str, Any]=True , __a : int=True , __a : List[str]=True , __a : str=True , __a : str=True , __a : List[str]=99 , __a : Union[str, Any]=32 , __a : Optional[Any]=5 , __a : int=4 , __a : List[Any]=37 , __a : Optional[Any]="gelu" , __a : Tuple=0.1 , __a : Any=0.1 , __a : int=512 , __a : int=16 , __a : Optional[int]=2 , __a : Any=0.02 , __a : str=3 , __a : Optional[Any]=4 , __a : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = use_mc_token_ids
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = self.vocab_size - 1
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
if self.use_mc_token_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase (self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase (self : Dict , __a : List[str] , __a : int , __a : Tuple , __a : int , __a : Union[str, Any] , *__a : List[Any] ):
UpperCAmelCase_ = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase (self : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : str , __a : Tuple , *__a : str ):
UpperCAmelCase_ = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _lowercase (self : Tuple , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Optional[Any] , *__a : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : List[Any] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = True
a__ : Union[str, Any] = False
a__ : Dict = False
def _lowercase (self : List[Any] , __a : int , __a : Optional[int] , __a : Dict , __a : int , __a : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase (self : Any ):
UpperCAmelCase_ = CTRLModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , n_embd=37 )
def _lowercase (self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Optional[int] ):
pass
@slow
def _lowercase (self : Union[str, Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _lowercase (self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__a )
UpperCAmelCase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
UpperCAmelCase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase_ = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase_ ( snake_case_ : bool = True , *snake_case_ : Union[str, Any] , **snake_case_ : int ) -> Tuple:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
UpperCAmelCase_ = False
if main_process_only:
UpperCAmelCase_ = PartialState().local_process_index == 0
return _tqdm(*snake_case_ , **snake_case_ , disable=snake_case_ )
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = ConsistencyModelPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : List[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _lowercase (self : int ):
UpperCAmelCase_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _lowercase (self : List[str] , __a : Tuple=False ):
if class_cond:
UpperCAmelCase_ = self.dummy_cond_unet
else:
UpperCAmelCase_ = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _lowercase (self : Any , __a : List[str] , __a : Tuple=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components(class_cond=__a )
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 0
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : str ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components(class_cond=__a )
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : str=0 , __a : str=False , __a : List[str]="cpu" , __a : str=torch.floataa , __a : int=(1, 3, 64, 64) ):
UpperCAmelCase_ = torch.manual_seed(__a )
UpperCAmelCase_ = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
UpperCAmelCase_ = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
UpperCAmelCase_ = latents
return inputs
def _lowercase (self : Any , __a : List[Any]=0 , __a : Tuple="cpu" , __a : Optional[int]=torch.floataa , __a : str=(1, 3, 64, 64) ):
if type(__a ) == str:
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def _lowercase (self : str ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowercase (self : int ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowercase (self : Any ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(get_fixed_latents=__a , device=__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(snake_case_ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase_ ( snake_case_ : str ) -> Tuple:
'''simple docstring'''
if len(snake_case_ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(snake_case_ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(snake_case_ , snake_case_ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(snake_case_ ) and j < len(snake_case_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , snake_case_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case_ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case_ )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_: List[str] ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __A :
def __init__(self : str , __a : dict[str, list[str]] , __a : str ):
UpperCAmelCase_ = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase_ = {}
UpperCAmelCase_ = source_vertex
def _lowercase (self : Dict ):
UpperCAmelCase_ = {self.source_vertex}
UpperCAmelCase_ = None
UpperCAmelCase_ = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__a )
UpperCAmelCase_ = vertex
queue.append(__a )
def _lowercase (self : Optional[int] , __a : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase_ = self.parent.get(__a )
if target_vertex_parent is None:
UpperCAmelCase_ = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(__a )
return self.shortest_path(__a ) + f"""->{target_vertex}"""
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: int =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase_ = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase_ = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-2_56:]
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = image.size
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
UpperCAmelCase_ = 8_00 if "detection" in checkpoint_url else 10_00
UpperCAmelCase_ = target_max_size / current_max_size
UpperCAmelCase_ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCAmelCase_ ( snake_case_ : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = F.to_tensor(snake_case_ )
UpperCAmelCase_ = F.normalize(snake_case_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[Any] ) -> int:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# create HuggingFace model and load state dict
UpperCAmelCase_ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ = 15
UpperCAmelCase_ = 2
UpperCAmelCase_ = {0: "table", 1: "table rotated"}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ = 1_25
UpperCAmelCase_ = 6
UpperCAmelCase_ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
UpperCAmelCase_ = TableTransformerForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion
UpperCAmelCase_ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=snake_case_ )
UpperCAmelCase_ = Image.open(snake_case_ ).convert("RGB" )
UpperCAmelCase_ = normalize(resize(snake_case_ , snake_case_ ) ).unsqueeze(0 )
UpperCAmelCase_ = model(snake_case_ )
if "detection" in checkpoint_url:
UpperCAmelCase_ = (1, 15, 3)
UpperCAmelCase_ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase_ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase_ = (1, 1_25, 7)
UpperCAmelCase_ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase_ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase_ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(snake_case_ )
image_processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = os.path.dirname(os.path.realpath(snake_case_ ) )
UpperCAmelCase_ = os.path.join(snake_case_ , "triangle.txt" )
with open(snake_case_ ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
for line in triangle:
UpperCAmelCase_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(snake_case_ ) )
a.append(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case_ , snake_case_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[str] = """gpt_neox_japanese"""
def __init__(self : List[Any] , __a : str=32000 , __a : int=2560 , __a : Dict=32 , __a : List[str]=32 , __a : str=4 , __a : List[str]="gelu" , __a : List[str]=1.00 , __a : Union[str, Any]=10000 , __a : str=2048 , __a : Tuple=0.02 , __a : List[Any]=1E-5 , __a : List[str]=True , __a : str=31996 , __a : Optional[Any]=31999 , __a : Any=0.1 , __a : List[Any]=0.0 , **__a : str , ):
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_multiple_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = rotary_pct
UpperCAmelCase_ = rotary_emb_base
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = hidden_dropout
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCAmelCase_ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , snake_case_ )
if matches:
UpperCAmelCase_ = float(matches[1] )
UpperCAmelCase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase_ = 10_01
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase_ = "background"
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Dict=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_mobilenet_va_config(snake_case_ )
# Load 🤗 model
UpperCAmelCase_ = MobileNetVaForImageClassification(snake_case_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case_ , snake_case_ , snake_case_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCAmelCase_ = "google/" + model_name
image_processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/transformers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
UpperCAmelCase_ = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = IFImgaImgSuperResolutionPipeline
a__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
a__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowercase (self : Optional[int] ):
return self._get_superresolution_dummy_components()
def _lowercase (self : List[Any] , __a : int , __a : List[Any]=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase (self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowercase (self : str ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowercase (self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowercase (self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowercase (self : Optional[int] ):
self._test_save_load_local()
def _lowercase (self : Optional[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( UpperCamelCase__ ):
a__ : Union[str, Any] = ["""image_processor""", """tokenizer"""]
a__ : List[str] = """ViltImageProcessor"""
a__ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Dict ):
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
UpperCAmelCase_ = self.image_processor
def __call__(self : List[str] , __a : List[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : List[str] , ):
UpperCAmelCase_ = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
UpperCAmelCase_ = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def _lowercase (self : Optional[Any] , *__a : Union[str, Any] , **__a : List[Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Any , *__a : Tuple , **__a : str ):
return self.tokenizer.decode(*__a , **__a )
@property
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase (self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def _lowercase (self : Any ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
class __A :
def __init__(self : Dict , __a : Any ):
UpperCAmelCase_ = metric_id
class __A :
a__ : Union[str, Any] = [MetricMock(UpperCamelCase__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def _lowercase (self : Optional[int] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(snake_case_ , match="https://huggingface.co/docs/evaluate" ):
func(*snake_case_ )
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_: Dict =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
SCREAMING_SNAKE_CASE_: Optional[Any] =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
SCREAMING_SNAKE_CASE_: Any =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
a__ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
a__ : Optional[float] = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
a__ : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
a__ : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _lowercase (self : Any ):
UpperCAmelCase_ = {}
if self.train_dir is not None:
UpperCAmelCase_ = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ = self.validation_dir
UpperCAmelCase_ = data_files if data_files else None
@dataclass
class __A :
a__ : str = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
a__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a__ : str = field(default=UpperCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class __A :
def __init__(self : int , __a : Union[str, Any]=192 , __a : str=32 , __a : List[Any]=4 , __a : Any=0.6 ):
UpperCAmelCase_ = input_size
UpperCAmelCase_ = mask_patch_size
UpperCAmelCase_ = model_patch_size
UpperCAmelCase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
UpperCAmelCase_ = self.input_size // self.mask_patch_size
UpperCAmelCase_ = self.mask_patch_size // self.model_patch_size
UpperCAmelCase_ = self.rand_size**2
UpperCAmelCase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self : Dict ):
UpperCAmelCase_ = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase_ = np.zeros(self.token_count , dtype=__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase_ = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase_ = split["train"]
UpperCAmelCase_ = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
UpperCAmelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case_ , "decoder_type" ):
UpperCAmelCase_ = "simmim"
# adapt config
UpperCAmelCase_ = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
UpperCAmelCase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ = AutoModelForMaskedImageModeling.from_config(snake_case_ )
if training_args.do_train:
UpperCAmelCase_ = ds["train"].column_names
else:
UpperCAmelCase_ = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ = "image"
elif "img" in column_names:
UpperCAmelCase_ = "img"
else:
UpperCAmelCase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase_ = Compose(
[
Lambda(lambda snake_case_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case_ : List[str] ):
UpperCAmelCase_ = [transforms(snake_case_ ) for image in examples[image_column_name]]
UpperCAmelCase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase_ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Initialize our trainer
UpperCAmelCase_ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ = trainer.evaluate()
trainer.log_metrics("eval" , snake_case_ )
trainer.save_metrics("eval" , snake_case_ )
# Write model card and (optionally) push to hub
UpperCAmelCase_ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__(self : List[Any] , __a : List[str] , __a : Union[str, Any]=13 , __a : Any=7 , __a : Optional[Any]=True , __a : str=True , __a : int=True , __a : List[Any]=True , __a : List[str]=99 , __a : str=32 , __a : int=5 , __a : List[Any]=4 , __a : Any=37 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : Optional[Any]=0.1 , __a : Any=128 , __a : Union[str, Any]=32 , __a : int=16 , __a : Tuple=2 , __a : Union[str, Any]=0.02 , __a : Optional[int]=3 , __a : str=4 , __a : List[str]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : Dict ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Any ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def _lowercase (self : Optional[Any] ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase (self : str , __a : Optional[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any] , __a : int , __a : Tuple , __a : str ):
UpperCAmelCase_ = NezhaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a )
UpperCAmelCase_ = model(__a , token_type_ids=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self : Dict , __a : List[Any] , __a : Optional[int] , __a : List[str] , __a : Any , __a : Tuple , __a : Dict , __a : List[str] , __a : Dict , __a : Union[str, Any] , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = NezhaModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self : Optional[Any] , __a : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = NezhaForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Dict , __a : List[str] , __a : Tuple , __a : List[str] , __a : Union[str, Any] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ):
UpperCAmelCase_ = NezhaForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase (self : List[Any] , __a : List[Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Optional[Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = NezhaForPreTraining(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase (self : Dict , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ):
UpperCAmelCase_ = NezhaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self : Optional[Any] , __a : Any , __a : Optional[int] , __a : Dict , __a : List[str] , __a : Optional[Any] , __a : Any , __a : Dict ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NezhaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : Tuple , __a : Tuple , __a : Any , __a : Dict , __a : Union[str, Any] , __a : Dict , __a : Dict , __a : int ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NezhaForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self : List[Any] , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : Tuple ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = NezhaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : List[Any] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : int = True
def _lowercase (self : Union[str, Any] , __a : Any , __a : List[Any] , __a : List[str]=False ):
UpperCAmelCase_ = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _lowercase (self : Any ):
UpperCAmelCase_ = NezhaModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def _lowercase (self : Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def _lowercase (self : Optional[int] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = NezhaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def _lowercase (self : int ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(config=__a )
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = torch.jit.trace(
__a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , "bert.pt" ) )
UpperCAmelCase_ = torch.jit.load(os.path.join(__a , "bert.pt" ) , map_location=__a )
loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __a )
UpperCAmelCase_ = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __a )
UpperCAmelCase_ = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_: Optional[Any] ='scheduler_config.json'
class __A ( UpperCamelCase__ ):
a__ : int = 1
a__ : Union[str, Any] = 2
a__ : Union[str, Any] = 3
a__ : Tuple = 4
a__ : List[Any] = 5
a__ : Union[str, Any] = 6
a__ : List[str] = 7
a__ : Tuple = 8
a__ : Optional[Any] = 9
a__ : str = 10
a__ : Dict = 11
a__ : Union[str, Any] = 12
a__ : List[Any] = 13
a__ : str = 14
@dataclass
class __A ( UpperCamelCase__ ):
a__ : torch.FloatTensor
class __A :
a__ : Optional[int] = SCHEDULER_CONFIG_NAME
a__ : int = []
a__ : List[str] = True
@classmethod
def _lowercase (cls : Dict , __a : Dict[str, Any] = None , __a : Optional[str] = None , __a : List[str]=False , **__a : Any , ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , return_commit_hash=__a , **__a , )
return cls.from_config(__a , return_unused_kwargs=__a , **__a )
def _lowercase (self : Any , __a : Union[str, os.PathLike] , __a : bool = False , **__a : Optional[Any] ):
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def _lowercase (self : Any ):
return self._get_compatibles()
@classmethod
def _lowercase (cls : Optional[Any] ):
UpperCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase_ = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase_ = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE_: List[Any] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/dummy-config.json')
class __A ( unittest.TestCase ):
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = 0
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
UpperCAmelCase_ = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("bert-base" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _lowercase (self : Union[str, Any] ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any] ):
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : int = """convnextv2"""
def __init__(self : Tuple , __a : Any=3 , __a : List[Any]=4 , __a : List[Any]=4 , __a : str=None , __a : Union[str, Any]=None , __a : Any="gelu" , __a : str=0.02 , __a : str=1E-12 , __a : str=0.0 , __a : str=224 , __a : Optional[Any]=None , __a : Union[str, Any]=None , **__a : Tuple , ):
super().__init__(**__a )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_stages
UpperCAmelCase_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_ = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = image_size
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
def __init__(self : List[str] , __a : Union[str, Any] , __a : List[str]=13 , __a : List[Any]=7 , __a : Union[str, Any]=True , __a : Any=True , __a : List[Any]=True , __a : str=True , __a : List[Any]=99 , __a : Any=32 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : int=0.1 , __a : str=0.1 , __a : List[Any]=512 , __a : Optional[int]=16 , __a : Tuple=2 , __a : Any=0.02 , __a : Union[str, Any]=4 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_attention_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_choices
def _lowercase (self : int ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_attention_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = True
a__ : Dict = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase (self : Dict ):
UpperCAmelCase_ = FlaxRoFormerModelTester(self )
@slow
def _lowercase (self : Optional[Any] ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__a )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = model(__a )[0]
UpperCAmelCase_ = 50000
UpperCAmelCase_ = (1, 6, vocab_size)
self.assertEqual(output.shape , __a )
UpperCAmelCase_ = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = max(snake_case_ )
UpperCAmelCase_ = min(snake_case_ )
# create the counting array
UpperCAmelCase_ = coll_max + 1 - coll_min
UpperCAmelCase_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case_ ):
UpperCAmelCase_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case_ ) ):
UpperCAmelCase_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
SCREAMING_SNAKE_CASE_: List[Any] =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: Union[str, Any] =[int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE_: int =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE_: int =' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
UpperCAmelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(__a , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowercase (self : str ):
UpperCAmelCase_ = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowercase (self : Any , __a : List[str] , __a : str , __a : int , __a : Any=None ):
UpperCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase_ = black.format_str(__a , mode=__a )
UpperCAmelCase_ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(__a , "w" , newline="\n" ) as f:
f.write(__a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__a )
with open(__a , "r" ) as f:
self.assertTrue(f.read() , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(__a , __a )
def _lowercase (self : int ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __a ) , )
# Copy consistency with a really long name
UpperCAmelCase_ = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , __a , __a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __a , overwrite_result=re.sub("DDPM" , "Test" , __a ) , )
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE_: int =pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
SCREAMING_SNAKE_CASE_: Tuple =dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE_: int =dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE_: Optional[Any] =PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE_: Any =poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE_: Optional[Any] =LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
plt.scatter(snake_case_ , snake_case_ , color="red" )
plt.plot(snake_case_ , pol_reg.predict(poly_reg.fit_transform(snake_case_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =None
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
SCREAMING_SNAKE_CASE_: Union[str, Any] =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __A ( UpperCamelCase__ ):
a__ : int = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = ["""input_ids""", """attention_mask"""]
a__ : Any = MBartTokenizer
a__ : List[int] = []
a__ : List[int] = []
def __init__(self : int , __a : Any=None , __a : Dict=None , __a : str="<s>" , __a : Tuple="</s>" , __a : Optional[Any]="</s>" , __a : Dict="<s>" , __a : Optional[int]="<unk>" , __a : int="<pad>" , __a : Any="<mask>" , __a : Dict=None , __a : Tuple=None , __a : Any=None , **__a : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , **__a , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase (self : str ):
return self._src_lang
@src_lang.setter
def _lowercase (self : int , __a : str ):
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase (self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self : Any , __a : int , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
UpperCAmelCase_ = tgt_lang_id
return inputs
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : int , ):
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def _lowercase (self : Optional[int] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase (self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase (self : int , __a : Optional[int] ):
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = self.convert_tokens_to_ids(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase (self : List[str] , __a : str , __a : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE_: Tuple =get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
SCREAMING_SNAKE_CASE_: Tuple =json.load(f)
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : str , __a : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(__a )
def _lowercase (self : List[Any] , __a : str ):
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _lowercase (self : Tuple , __a : str , __a : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase_ = f"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(__a )
UpperCAmelCase_ = self.get_model(__a )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(__a , return_tensors="pt" , truncation=__a , padding="longest" ).to(__a )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
UpperCAmelCase_ = calculate_bleu(__a , __a )
print(__a )
self.assertGreaterEqual(scores["bleu"] , __a )
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ = array[indexa], array[indexa]
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
for i in range(snake_case_ , low + middle ):
comp_and_swap(snake_case_ , snake_case_ , i + middle , snake_case_ )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
bitonic_merge(snake_case_ , low + middle , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
bitonic_sort(snake_case_ , snake_case_ , snake_case_ , 1 )
bitonic_sort(snake_case_ , low + middle , snake_case_ , 0 )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: Union[str, Any] =[int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
a__ : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__(self : List[Any] , __a : int , __a : int , __a : Optional[int] = None , __a : int = 50257 , __a : int = 1024 , __a : int = 768 , __a : int = 12 , __a : int = 12 , __a : Optional[int] = None , __a : str = "gelu_new" , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 1E-5 , __a : float = 0.02 , __a : bool = True , __a : bool = True , __a : bool = False , __a : bool = False , ):
super().__init__()
UpperCAmelCase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
UpperCAmelCase_ = prefix_inner_dim
UpperCAmelCase_ = prefix_hidden_dim
UpperCAmelCase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase_ = (
nn.Linear(self.prefix_hidden_dim , __a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase_ = GPTaConfig(
vocab_size=__a , n_positions=__a , n_embd=__a , n_layer=__a , n_head=__a , n_inner=__a , activation_function=__a , resid_pdrop=__a , embd_pdrop=__a , attn_pdrop=__a , layer_norm_epsilon=__a , initializer_range=__a , scale_attn_weights=__a , use_cache=__a , scale_attn_by_inverse_layer_idx=__a , reorder_and_upcast_attn=__a , )
UpperCAmelCase_ = GPTaLMHeadModel(__a )
def _lowercase (self : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self.transformer.transformer.wte(__a )
UpperCAmelCase_ = self.encode_prefix(__a )
UpperCAmelCase_ = self.decode_prefix(__a )
UpperCAmelCase_ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase_ = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase_ = self.transformer(inputs_embeds=__a , labels=__a , attention_mask=__a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowercase (self : List[Any] , __a : int , __a : torch.device ):
return torch.zeros(__a , self.prefix_length , dtype=torch.intaa , device=__a )
def _lowercase (self : Union[str, Any] , __a : Tuple ):
return self.encode_prefix(__a )
@torch.no_grad()
def _lowercase (self : str , __a : List[str] , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = torch.split(__a , 1 , dim=0 )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for feature in features:
UpperCAmelCase_ = self.decode_prefix(feature.to(__a ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase_ , UpperCAmelCase_ = self.generate_beam(
input_embeds=__a , device=__a , eos_token_id=__a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase_ = torch.stack(__a )
UpperCAmelCase_ = torch.stack(__a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowercase (self : List[str] , __a : Dict=None , __a : Optional[int]=None , __a : List[Any]=None , __a : int = 5 , __a : int = 67 , __a : float = 1.0 , __a : Optional[int] = None , ):
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.ones(__a , device=__a , dtype=torch.int )
UpperCAmelCase_ = torch.zeros(__a , device=__a , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase_ = input_embeds
else:
UpperCAmelCase_ = self.transformer.transformer.wte(__a )
for i in range(__a ):
UpperCAmelCase_ = self.transformer(inputs_embeds=__a )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase_ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase_ , UpperCAmelCase_ = logits.topk(__a , -1 )
UpperCAmelCase_ = generated.expand(__a , *generated.shape[1:] )
UpperCAmelCase_ , UpperCAmelCase_ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase_ = next_tokens
else:
UpperCAmelCase_ = tokens.expand(__a , *tokens.shape[1:] )
UpperCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase_ = -float(np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase_ = scores_sum / seq_lengths[:, None]
UpperCAmelCase_ , UpperCAmelCase_ = scores_sum_average.view(-1 ).topk(__a , -1 )
UpperCAmelCase_ = next_tokens // scores_sum.shape[1]
UpperCAmelCase_ = seq_lengths[next_tokens_source]
UpperCAmelCase_ = next_tokens % scores_sum.shape[1]
UpperCAmelCase_ = next_tokens.unsqueeze(1 )
UpperCAmelCase_ = tokens[next_tokens_source]
UpperCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase_ = generated[next_tokens_source]
UpperCAmelCase_ = scores_sum_average * seq_lengths
UpperCAmelCase_ = is_stopped[next_tokens_source]
UpperCAmelCase_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase_ = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase_ = is_stopped + next_tokens.eq(__a ).squeeze()
if is_stopped.all():
break
UpperCAmelCase_ = scores / seq_lengths
UpperCAmelCase_ = scores.argsort(descending=__a )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase_ = [tokens[i] for i in order]
UpperCAmelCase_ = torch.stack(__a , dim=0 )
UpperCAmelCase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
if number != int(snake_case_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
UpperCAmelCase_ = [-1] * (number + 1)
UpperCAmelCase_ = 0
for i in range(1 , number + 1 ):
UpperCAmelCase_ = sys.maxsize
UpperCAmelCase_ = int(math.sqrt(snake_case_ ) )
for j in range(1 , root + 1 ):
UpperCAmelCase_ = 1 + answers[i - (j**2)]
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
UpperCAmelCase_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """philschmid/bart-large-cnn-samsum"""
a__ : str = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
a__ : Optional[int] = """summarizer"""
a__ : str = AutoTokenizer
a__ : Optional[int] = AutoModelForSeqaSeqLM
a__ : Optional[int] = ["""text"""]
a__ : Optional[int] = ["""text"""]
def _lowercase (self : Dict , __a : int ):
return self.pre_processor(__a , return_tensors="pt" , truncation=__a )
def _lowercase (self : Optional[Any] , __a : Optional[int] ):
return self.model.generate(**__a )[0]
def _lowercase (self : List[str] , __a : Any ):
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__(self : Union[str, Any] , __a : Tuple=None , **__a : List[Any] ):
super().__init__(features=__a )
UpperCAmelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowercase (self : Optional[Any] , __a : int ):
import torch
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__a )
return column
def _lowercase (self : Optional[Any] , __a : Tuple ):
import torch
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase_ = {"dtype": torch.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
return torch.tensor(__a , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowercase (self : int , __a : str ):
import torch
# support for torch, tf, jax etc.
if hasattr(__a , "__array__" ) and not isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Any , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : int , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
class __A :
a__ : str
a__ : str = None
@staticmethod
def _lowercase ():
raise NotImplementedError
def _lowercase (self : Any , __a : int , __a : int , __a : str , **__a : Dict ):
raise NotImplementedError
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
raise NotImplementedError
def _lowercase (self : List[Any] ):
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _lowercase (cls : Optional[int] ):
return f"""`pip install {cls.pip_package or cls.name}`"""
class __A ( UpperCamelCase__ ):
a__ : int = """optuna"""
@staticmethod
def _lowercase ():
return is_optuna_available()
def _lowercase (self : int , __a : int , __a : int , __a : str , **__a : Tuple ):
return run_hp_search_optuna(__a , __a , __a , **__a )
def _lowercase (self : Any , __a : Dict ):
return default_hp_space_optuna(__a )
class __A ( UpperCamelCase__ ):
a__ : Dict = """ray"""
a__ : List[Any] = """'ray[tune]'"""
@staticmethod
def _lowercase ():
return is_ray_available()
def _lowercase (self : Dict , __a : int , __a : int , __a : str , **__a : Optional[Any] ):
return run_hp_search_ray(__a , __a , __a , **__a )
def _lowercase (self : Optional[Any] , __a : Tuple ):
return default_hp_space_ray(__a )
class __A ( UpperCamelCase__ ):
a__ : Tuple = """sigopt"""
@staticmethod
def _lowercase ():
return is_sigopt_available()
def _lowercase (self : str , __a : Any , __a : int , __a : str , **__a : Any ):
return run_hp_search_sigopt(__a , __a , __a , **__a )
def _lowercase (self : List[Any] , __a : Optional[int] ):
return default_hp_space_sigopt(__a )
class __A ( UpperCamelCase__ ):
a__ : List[str] = """wandb"""
@staticmethod
def _lowercase ():
return is_wandb_available()
def _lowercase (self : str , __a : List[Any] , __a : int , __a : str , **__a : int ):
return run_hp_search_wandb(__a , __a , __a , **__a )
def _lowercase (self : Dict , __a : Tuple ):
return default_hp_space_wandb(__a )
SCREAMING_SNAKE_CASE_: int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
UpperCAmelCase_ = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
f"""{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=snake_case_ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=snake_case_ , with_box_refine=snake_case_ , two_stage=snake_case_ , )
# set labels
UpperCAmelCase_ = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase_ = 3_66
UpperCAmelCase_ = "object365-id2label.json"
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = get_deta_config(snake_case_ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(snake_case_ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_swin_q_k_v(snake_case_ , config.backbone_config )
read_in_decoder_q_k_v(snake_case_ , snake_case_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(snake_case_ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(pixel_values.to(snake_case_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case_ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case_ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( UpperCamelCase__ ):
a__ : Any = """umt5"""
a__ : Optional[int] = ["""past_key_values"""]
def __init__(self : Optional[Any] , __a : List[str]=250112 , __a : Union[str, Any]=512 , __a : str=64 , __a : Tuple=1024 , __a : Tuple=8 , __a : int=None , __a : List[Any]=6 , __a : Any=32 , __a : Optional[int]=128 , __a : List[str]=0.1 , __a : List[Any]=1E-6 , __a : List[Any]=1.0 , __a : str="gated-gelu" , __a : List[Any]=True , __a : List[Any]=True , __a : List[str]="T5Tokenizer" , __a : Union[str, Any]=True , __a : List[str]=0 , __a : Optional[Any]=1 , __a : List[str]=0 , **__a : List[Any] , ):
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
@property
def _lowercase (self : int ):
return self.d_model
@property
def _lowercase (self : Optional[int] ):
return self.num_heads
@property
def _lowercase (self : List[str] ):
return self.num_layers
class __A ( UpperCamelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _lowercase (self : Tuple ):
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _lowercase (self : Optional[int] ):
return 13
@property
def _lowercase (self : int ):
return 5E-4
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case_ , 2 ) + pow(snake_case_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Any , *__a : Optional[Any] , **__a : List[str] ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: int ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: List[str] ={
'allenai/led-base-16384': 1_63_84,
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = LEDTokenizer
a__ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__(self : List[str] , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : Tuple=None , __a : Optional[int]="replace" , __a : Union[str, Any]="<s>" , __a : Tuple="</s>" , __a : List[Any]="</s>" , __a : Optional[Any]="<s>" , __a : List[str]="<unk>" , __a : Optional[int]="<pad>" , __a : int="<mask>" , __a : Union[str, Any]=False , __a : List[str]=True , **__a : Union[str, Any] , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(__a , state.pop("type" ) )
UpperCAmelCase_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase (self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase (self : Union[str, Any] , __a : str ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCAmelCase_ = value
def _lowercase (self : Union[str, Any] , *__a : Union[str, Any] , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Dict , *__a : Tuple , **__a : str ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : Dict , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : str , __a : List[str] , __a : Dict=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self : Any , __a : Union[Dict[str, EncodedInput], BatchEncoding] , __a : Optional[int] = None , __a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a : Optional[int] = None , __a : Optional[bool] = None , ):
UpperCAmelCase_ = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ = len(encoded_inputs["global_attention_mask"] ) != len(__a )
if needs_to_be_padded:
UpperCAmelCase_ = len(__a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE_: Any =Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE_: str ={'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ='zero2'
SCREAMING_SNAKE_CASE_: Dict ='zero3'
SCREAMING_SNAKE_CASE_: Optional[Any] =[ZEROa, ZEROa]
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = parameterized.to_safe_name("_".join(str(snake_case_ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE_: List[Any] =list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( UpperCamelCase__ ):
@parameterized.expand(__a , name_func=__a )
def _lowercase (self : Union[str, Any] , __a : Dict , __a : Any ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[Any] ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def _lowercase (self : Tuple , __a : Any , __a : str ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def _lowercase (self : int , __a : Union[str, Any] , __a : Any ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def _lowercase (self : str , __a : Optional[int] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def _lowercase (self : List[str] , __a : str , __a : str , __a : int = 10 , __a : bool = True , __a : bool = True , __a : bool = True , ):
UpperCAmelCase_ = models[model]
UpperCAmelCase_ = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def _lowercase (self : Any , __a : str , __a : str , __a : int = 10 , __a : int = 1 , __a : bool = True , __a : bool = True , ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir("./xxx" , after=__a )
UpperCAmelCase_ = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__a )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase_ = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase_ = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase_ = self.get_launcher(__a )
UpperCAmelCase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def _lowercase (self : Any , __a : List[str]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCAmelCase_ = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: List[Any] ={
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =[
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_: List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = XLMProphetNetTokenizer
a__ : Dict = False
a__ : List[Any] = True
def _lowercase (self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1012 )
def _lowercase (self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def _lowercase (self : Optional[int] ):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def _lowercase (self : int ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> bool:
'''simple docstring'''
if index == len(snake_case_ ):
return True
# Recursive Step
for i in range(snake_case_ ):
if valid_coloring(graph[index] , snake_case_ , snake_case_ ):
# Color current vertex
UpperCAmelCase_ = i
# Validate coloring
if util_color(snake_case_ , snake_case_ , snake_case_ , index + 1 ):
return True
# Backtrack
UpperCAmelCase_ = -1
return False
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [-1] * len(snake_case_ )
if util_color(snake_case_ , snake_case_ , snake_case_ , 0 ):
return colored_vertices
return []
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_: str =16
SCREAMING_SNAKE_CASE_: Dict =32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case_ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
snake_case_ , padding="longest" , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_: List[Any] =mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : int ) -> Dict:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case_ ) == "1":
UpperCAmelCase_ = 2
# New Code #
UpperCAmelCase_ = int(args.gradient_accumulation_steps )
UpperCAmelCase_ = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = evaluate.load("glue" , "mrpc" )
set_seed(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ , model=snake_case_ , local_sgd_steps=snake_case_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case_ , default=snake_case_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=snake_case_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCamelCase__ ):
a__ : List[str] = ["""onnx"""]
def __init__(self : List[Any] , *__a : Dict , **__a : Optional[Any] ):
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase (cls : List[str] , *__a : Any , **__a : List[Any] ):
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase (cls : Optional[int] , *__a : Any , **__a : int ):
requires_backends(cls , ["onnx"] )
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = UnCLIPImageVariationPipeline
a__ : str = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
a__ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
a__ : List[Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
a__ : Optional[int] = False
@property
def _lowercase (self : List[str] ):
return 32
@property
def _lowercase (self : Union[str, Any] ):
return 32
@property
def _lowercase (self : Optional[Any] ):
return self.time_input_dim
@property
def _lowercase (self : int ):
return self.time_input_dim * 4
@property
def _lowercase (self : List[str] ):
return 100
@property
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def _lowercase (self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__a )
@property
def _lowercase (self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
UpperCAmelCase_ = UnCLIPTextProjModel(**__a )
return model
@property
def _lowercase (self : str ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
UpperCAmelCase_ = UNetaDConditionModel(**__a )
return model
@property
def _lowercase (self : Tuple ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase (self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase (self : List[Any] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase (self : int ):
UpperCAmelCase_ = self.dummy_decoder
UpperCAmelCase_ = self.dummy_text_proj
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_super_res_first
UpperCAmelCase_ = self.dummy_super_res_last
UpperCAmelCase_ = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCAmelCase_ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCAmelCase_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase (self : Union[str, Any] , __a : Tuple , __a : Optional[int]=0 , __a : int=True ):
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
if pil_image:
UpperCAmelCase_ = input_image * 0.5 + 0.5
UpperCAmelCase_ = input_image.clamp(0 , 1 )
UpperCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCAmelCase_ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Dict ):
UpperCAmelCase_ = torch.device("cpu" )
class __A :
a__ : Tuple = 1
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = pipe.decoder.dtype
UpperCAmelCase_ = 1
UpperCAmelCase_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase_ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
UpperCAmelCase_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase_ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a ).images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
# Don't pass image, instead pass embedding
UpperCAmelCase_ = pipeline_inputs.pop("image" )
UpperCAmelCase_ = pipe.image_encoder(__a ).image_embeds
UpperCAmelCase_ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a , image_embeddings=__a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase_ = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a , expected_max_diff=__a )
@skip_mps
def _lowercase (self : Tuple ):
UpperCAmelCase_ = torch_device == "cpu"
UpperCAmelCase_ = True
UpperCAmelCase_ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , additional_params_copy_to_batched_inputs=__a , )
def _lowercase (self : Dict ):
UpperCAmelCase_ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a , additional_params_copy_to_batched_inputs=__a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a )
@skip_mps
def _lowercase (self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase (self : str ):
return super().test_save_load_local()
@skip_mps
def _lowercase (self : List[str] ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
UpperCAmelCase_ = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__a , __a , 15 )
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : bool = True , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : bool = False , snake_case_ : float = 1_00 , snake_case_ : float = 0.01 , snake_case_ : float = 1 , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = search_prob
UpperCAmelCase_ = start_temperate
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
while not search_end:
UpperCAmelCase_ = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ = current_state
scores.append(snake_case_ )
iterations += 1
UpperCAmelCase_ = None
UpperCAmelCase_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ = random.randint(0 , len(snake_case_ ) - 1 ) # picking a random neighbor
UpperCAmelCase_ = neighbors.pop(snake_case_ )
UpperCAmelCase_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ = picked_neighbor
else:
UpperCAmelCase_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ = picked_neighbor
UpperCAmelCase_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case_ ) , snake_case_ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int ) -> int:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_: List[Any] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Optional[Any] =simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_: Tuple =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: List[str] =simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE_: str =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Tuple =simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
)
SCREAMING_SNAKE_CASE_: List[str] =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Tuple =simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
)
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_: List[str] =data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_: str =data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_: List[str] =data_utils
SCREAMING_SNAKE_CASE_: Optional[Any] =data_utils
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(snake_case_ )
UpperCAmelCase_ = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(snake_case_ )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 50 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE_: Dict ='Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ = get_sagemaker_input()
else:
UpperCAmelCase_ = get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser("config" , description=snake_case_ )
else:
UpperCAmelCase_ = argparse.ArgumentParser("Accelerate config command" , description=snake_case_ )
parser.add_argument(
"--config_file" , default=snake_case_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ = args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
UpperCAmelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = config_command_parser()
UpperCAmelCase_ = parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE_: Dict ={0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCAmelCase_ ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
order.append(snake_case_ )
return order
def lowerCAmelCase_ ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case_ , snake_case_ , snake_case_ )
return component
def lowerCAmelCase_ ( snake_case_ : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ ) * [False]
UpperCAmelCase_ = {vert: [] for vert in range(len(snake_case_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case_ )
UpperCAmelCase_ = []
for i, was_visited in enumerate(snake_case_ ):
if not was_visited:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = []
UpperCAmelCase_ = len(snake_case_ ) * [False]
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = order[len(snake_case_ ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ = find_components(snake_case_ , snake_case_ , snake_case_ )
components_list.append(snake_case_ )
return components_list
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCAmelCase_ ( snake_case_ : int = 1_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase_ = pre_numerator
UpperCAmelCase_ = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase_ = cur_numerator
UpperCAmelCase_ = e_cont * pre_numerator + temp
return sum_digits(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |