Add AutoTokenizer & Sentence Transformers support

#1
by tomaarsen HF staff - opened
1_Pooling/config.json CHANGED
@@ -1,10 +1,9 @@
1
  {
2
- "word_embedding_dimension": 768,
3
- "pooling_mode_cls_token": false,
4
- "pooling_mode_mean_tokens": true,
5
- "pooling_mode_max_tokens": false,
6
- "pooling_mode_mean_sqrt_len_tokens": false,
7
- "pooling_mode_weightedmean_tokens": false,
8
- "pooling_mode_lasttoken": false
9
- }
10
-
 
1
  {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false
9
+ }
 
README.md CHANGED
@@ -1,6 +1,10 @@
1
  ---
 
 
2
  tags:
3
- - mteb
 
 
4
  model-index:
5
  - name: epoch_0_model
6
  results:
@@ -2655,6 +2659,17 @@ Training data to train the models is released in its entirety. For more details,
2655
 
2656
  ## Usage
2657
 
 
 
 
 
 
 
 
 
 
 
 
2658
 
2659
  ```python
2660
  import torch
@@ -2669,7 +2684,8 @@ def mean_pooling(model_output, attention_mask):
2669
  sentences = ['What is TSNE?', 'Who is Laurens van der Maaten?']
2670
 
2671
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
2672
- model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1-unsupervised', trust_remote_code=True)
 
2673
 
2674
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
2675
 
@@ -2688,8 +2704,8 @@ The model natively supports scaling of the sequence length past 2048 tokens. To
2688
  + tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', model_max_length=8192)
2689
 
2690
 
2691
- - model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1-unsupervised', trust_remote_code=True)
2692
- + model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1-unsupervised', trust_remote_code=True, rotary_scaling_factor=2)
2693
  ```
2694
 
2695
  # Join the Nomic Community
 
1
  ---
2
+ library_name: sentence-transformers
3
+ pipeline_tag: sentence-similarity
4
  tags:
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ - mteb
8
  model-index:
9
  - name: epoch_0_model
10
  results:
 
2659
 
2660
  ## Usage
2661
 
2662
+ ### Sentence Transformers
2663
+ ```python
2664
+ from sentence_transformers import SentenceTransformer
2665
+
2666
+ model = SentenceTransformer("../nomic-embed-text-v1", trust_remote_code=True)
2667
+ sentences = ['What is TSNE?', 'Who is Laurens van der Maaten?']
2668
+ embeddings = model.encode(sentences)
2669
+ print(embeddings)
2670
+ ```
2671
+
2672
+ ### Transformers
2673
 
2674
  ```python
2675
  import torch
 
2684
  sentences = ['What is TSNE?', 'Who is Laurens van der Maaten?']
2685
 
2686
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
2687
+ model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1', trust_remote_code=True)
2688
+ model.eval()
2689
 
2690
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
2691
 
 
2704
  + tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', model_max_length=8192)
2705
 
2706
 
2707
+ - model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1', trust_remote_code=True)
2708
+ + model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1', trust_remote_code=True, rotary_scaling_factor=2)
2709
  ```
2710
 
2711
  # Join the Nomic Community
config.json CHANGED
@@ -12,7 +12,7 @@
12
  "bos_token_id": null,
13
  "causal": false,
14
  "dense_seq_output": true,
15
- "embd_pdrop": 0.1,
16
  "eos_token_id": null,
17
  "fused_bias_fc": true,
18
  "fused_dropout_add_ln": true,
@@ -32,7 +32,7 @@
32
  "prenorm": false,
33
  "qkv_proj_bias": false,
34
  "reorder_and_upcast_attn": false,
35
- "resid_pdrop": 0.1,
36
  "rotary_emb_base": 1000,
37
  "rotary_emb_fraction": 1.0,
38
  "rotary_emb_interleaved": false,
@@ -41,7 +41,7 @@
41
  "scale_attn_by_inverse_layer_idx": false,
42
  "scale_attn_weights": true,
43
  "summary_activation": null,
44
- "summary_first_dropout": 0.1,
45
  "summary_proj_to_labels": true,
46
  "summary_type": "cls_index",
47
  "summary_use_proj": true,
 
12
  "bos_token_id": null,
13
  "causal": false,
14
  "dense_seq_output": true,
15
+ "embd_pdrop": 0.0,
16
  "eos_token_id": null,
17
  "fused_bias_fc": true,
18
  "fused_dropout_add_ln": true,
 
32
  "prenorm": false,
33
  "qkv_proj_bias": false,
34
  "reorder_and_upcast_attn": false,
35
+ "resid_pdrop": 0.0,
36
  "rotary_emb_base": 1000,
37
  "rotary_emb_fraction": 1.0,
38
  "rotary_emb_interleaved": false,
 
41
  "scale_attn_by_inverse_layer_idx": false,
42
  "scale_attn_weights": true,
43
  "summary_activation": null,
44
+ "summary_first_dropout": 0.0,
45
  "summary_proj_to_labels": true,
46
  "summary_type": "cls_index",
47
  "summary_use_proj": true,
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.4.0.dev0",
4
+ "transformers": "4.37.2",
5
+ "pytorch": "2.1.0+cu121"
6
+ }
7
+ }
modeling_hf_nomic_bert.py CHANGED
@@ -1069,6 +1069,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
1069
  position_ids=None,
1070
  token_type_ids=None,
1071
  attention_mask=None,
 
1072
  ):
1073
  if token_type_ids is None:
1074
  token_type_ids = torch.zeros_like(input_ids)
@@ -1080,7 +1081,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
1080
 
1081
  attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
1082
  sequence_output = self.encoder(
1083
- hidden_states, attention_mask=attention_mask
1084
  )
1085
 
1086
  pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
 
1069
  position_ids=None,
1070
  token_type_ids=None,
1071
  attention_mask=None,
1072
+ return_dict=None,
1073
  ):
1074
  if token_type_ids is None:
1075
  token_type_ids = torch.zeros_like(input_ids)
 
1081
 
1082
  attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
1083
  sequence_output = self.encoder(
1084
+ hidden_states, attention_mask=attention_mask, return_dict=return_dict,
1085
  )
1086
 
1087
  pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 8192,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 8192,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff