Divyansh2992 commited on
Commit
9533470
·
verified ·
1 Parent(s): a5d931a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.SRC filter=lfs diff=lfs merge=lfs -text
37
+ model.TGT filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "IndicTransForConditionalGeneration"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "attn_implementation": null,
9
+ "auto_map": {
10
+ "AutoConfig": "ai4bharat/indictrans2-en-indic-1B--configuration_indictrans.IndicTransConfig",
11
+ "AutoModelForSeq2SeqLM": "ai4bharat/indictrans2-en-indic-1B--modeling_indictrans.IndicTransForConditionalGeneration"
12
+ },
13
+ "bos_token_id": 0,
14
+ "decoder_attention_heads": 16,
15
+ "decoder_embed_dim": 1024,
16
+ "decoder_ffn_dim": 8192,
17
+ "decoder_layerdrop": 0,
18
+ "decoder_layers": 18,
19
+ "decoder_normalize_before": true,
20
+ "decoder_start_token_id": 2,
21
+ "decoder_vocab_size": 122672,
22
+ "dropout": 0.2,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_embed_dim": 1024,
25
+ "encoder_ffn_dim": 8192,
26
+ "encoder_layerdrop": 0,
27
+ "encoder_layers": 18,
28
+ "encoder_normalize_before": true,
29
+ "encoder_vocab_size": 32322,
30
+ "eos_token_id": 2,
31
+ "init_std": 0.02,
32
+ "is_encoder_decoder": true,
33
+ "layernorm_embedding": false,
34
+ "max_source_positions": 256,
35
+ "max_target_positions": 256,
36
+ "model_type": "IndicTrans",
37
+ "num_hidden_layers": 18,
38
+ "pad_token_id": 1,
39
+ "scale_embedding": true,
40
+ "share_decoder_input_output_embed": false,
41
+ "tokenizer_class": "IndicTransTokenizer",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.51.3",
44
+ "use_cache": true,
45
+ "vocab_size": 122672
46
+ }
dict.SRC.json ADDED
The diff for this file is too large to render. See raw diff
 
dict.TGT.json ADDED
The diff for this file is too large to render. See raw diff
 
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.51.3"
8
+ }
model.SRC ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cedc5cbcc740369b76201942a0f096fec7287fee039b55bdb956f301235b914
3
+ size 759425
model.TGT ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac9257c8e76b8b607705b959cc3d075656ea33032f7a974e467b8941df6e98d4
3
+ size 3256903
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35d28fe035cd6ac026536b555558b07762425c8b930670219063e4fc3666c96d
3
+ size 4462265272
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "ai4bharat/indictrans2-en-indic-1B--tokenization_indictrans.IndicTransTokenizer",
39
+ null
40
+ ]
41
+ },
42
+ "bos_token": "<s>",
43
+ "clean_up_tokenization_spaces": true,
44
+ "do_lower_case": false,
45
+ "eos_token": "</s>",
46
+ "extra_special_tokens": {},
47
+ "model_max_length": 256,
48
+ "pad_token": "<pad>",
49
+ "src_vocab_file": "C:\\Users\\Divyansh\\.cache\\huggingface\\hub\\models--ai4bharat--indictrans2-en-indic-1B\\snapshots\\10e65a9951a1e922cd109a95e8aba9357b62144b\\dict.SRC.json",
50
+ "tgt_vocab_file": "C:\\Users\\Divyansh\\.cache\\huggingface\\hub\\models--ai4bharat--indictrans2-en-indic-1B\\snapshots\\10e65a9951a1e922cd109a95e8aba9357b62144b\\dict.TGT.json",
51
+ "tokenizer_class": "IndicTransTokenizer",
52
+ "unk_token": "<unk>"
53
+ }