hatianzhang commited on
Commit
da70fe9
·
1 Parent(s): 45d08bb
splade_query/handler.py → handler.py RENAMED
@@ -3,40 +3,54 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
3
  import torch
4
  from subprocess import run
5
 
 
6
  # set device
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
 
 
 
 
9
 
10
  class EndpointHandler():
11
  def __init__(self, path=""):
12
- # self.pipeline = pipeline("text-classification", model=path)
13
- # self.holidays = holidays.US()
14
- self.query_model = AutoModelForMaskedLM.from_pretrained(path).to(device)
15
- self.query_tokenizer = AutoTokenizer.from_pretrained(path)
16
 
17
 
18
  def __call__(self, data: Dict[str, Any]) -> Tuple[List[List[int]], List[List[float]]]:
19
  """
20
- data args:
21
- inputs (:obj: `str`)
22
- date (:obj: `str`)
23
- Return:
24
  A :obj:`list` | `dict`: will be serialized and returned
25
  """
26
  # get inputs
27
  texts = data.pop("inputs", data)
28
-
29
- tokens = self.query_tokenizer(
 
 
 
 
 
 
 
 
 
 
 
 
30
  texts, truncation=True, padding=True, return_tensors="pt"
31
  )
32
 
33
- tokens = self.query_tokenizer(
34
- texts, truncation=True, padding=True, return_tensors="pt"
35
- )
36
  if torch.cuda.is_available():
37
  tokens = tokens.to("cuda")
38
 
39
- output = self.query_model(**tokens)
40
  logits, attention_mask = output.logits, tokens.attention_mask
41
  relu_log = torch.log(1 + torch.relu(logits))
42
  weighted_log = relu_log * attention_mask.unsqueeze(-1)
 
3
  import torch
4
  from subprocess import run
5
 
6
+
7
  # set device
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
 
10
+ # set path
11
+ query_emb_model_path = "/splade_query"
12
+ doc_emb_model_path = "/splade_doc"
13
+
14
 
15
  class EndpointHandler():
16
  def __init__(self, path=""):
17
+ self.query_model = AutoModelForMaskedLM.from_pretrained(path+query_emb_model_path).to(device)
18
+ self.query_tokenizer = AutoTokenizer.from_pretrained(path+query_emb_model_path)
19
+ self.doc_model = AutoModelForMaskedLM.from_pretrained(path+doc_emb_model_path).to(device)
20
+ self.doc_tokenizer = AutoTokenizer.from_pretrained(path+doc_emb_model_path)
21
 
22
 
23
  def __call__(self, data: Dict[str, Any]) -> Tuple[List[List[int]], List[List[float]]]:
24
  """
25
+ data args:
26
+ inputs (:obj: `List[str]`)
27
+ task (:obj: `str`)
28
+ Return:
29
  A :obj:`list` | `dict`: will be serialized and returned
30
  """
31
  # get inputs
32
  texts = data.pop("inputs", data)
33
+ task = data.pop("task", data)
34
+ emb_model = None
35
+ tokenizer = None
36
+
37
+ if task == "query_emb":
38
+ emb_model = self.query_model
39
+ tokenizer = self.query_tokenizer
40
+ elif task == "doc_emb":
41
+ emb_model = self.doc_model
42
+ tokenizer = self.doc_tokenizer
43
+ else:
44
+ raise ValueError("task must be either 'query_emb' or 'doc_emb'")
45
+
46
+ tokens = tokenizer(
47
  texts, truncation=True, padding=True, return_tensors="pt"
48
  )
49
 
 
 
 
50
  if torch.cuda.is_available():
51
  tokens = tokens.to("cuda")
52
 
53
+ output = emb_model(**tokens)
54
  logits, attention_mask = output.logits, tokens.attention_mask
55
  relu_log = torch.log(1 + torch.relu(logits))
56
  weighted_log = relu_log * attention_mask.unsqueeze(-1)
splade_doc/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ language: "en"
4
+ tags:
5
+ - splade
6
+ - query-expansion
7
+ - document-expansion
8
+ - bag-of-words
9
+ - passage-retrieval
10
+ - knowledge-distillation
11
+ - document encoder
12
+ datasets:
13
+ - ms_marco
14
+ ---
15
+ ## Efficient SPLADE
16
+ Efficient SPLADE model for passage retrieval. This architecture uses two distinct models for query and document inference. This is the **doc** one, please also download the **query** one (https://huggingface.co/naver/efficient-splade-VI-BT-large-query). For additional details, please visit:
17
+ * paper: https://dl.acm.org/doi/10.1145/3477495.3531833
18
+ * code: https://github.com/naver/splade
19
+ | | MRR@10 (MS MARCO dev) | R@1000 (MS MARCO dev) | Latency (PISA) ms | Latency (Inference) ms
20
+ | --- | --- | --- | --- | --- |
21
+ | `naver/efficient-splade-V-large` | 38.8 | 98.0 | 29.0 | 45.3
22
+ | `naver/efficient-splade-VI-BT-large` | 38.0 | 97.8 | 31.1 | 0.7
23
+ ## Citation
24
+ If you use our checkpoint, please cite our work:
25
+ ```
26
+ @inproceedings{10.1145/3477495.3531833,
27
+ author = {Lassance, Carlos and Clinchant, St\'{e}phane},
28
+ title = {An Efficiency Study for SPLADE Models},
29
+ year = {2022},
30
+ isbn = {9781450387323},
31
+ publisher = {Association for Computing Machinery},
32
+ address = {New York, NY, USA},
33
+ url = {https://doi.org/10.1145/3477495.3531833},
34
+ doi = {10.1145/3477495.3531833},
35
+ abstract = {Latency and efficiency issues are often overlooked when evaluating IR models based on Pretrained Language Models (PLMs) in reason of multiple hardware and software testing scenarios. Nevertheless, efficiency is an important part of such systems and should not be overlooked. In this paper, we focus on improving the efficiency of the SPLADE model since it has achieved state-of-the-art zero-shot performance and competitive results on TREC collections. SPLADE efficiency can be controlled via a regularization factor, but solely controlling this regularization has been shown to not be efficient enough. In order to reduce the latency gap between SPLADE and traditional retrieval systems, we propose several techniques including L1 regularization for queries, a separation of document/query encoders, a FLOPS-regularized middle-training, and the use of faster query encoders. Our benchmark demonstrates that we can drastically improve the efficiency of these models while increasing the performance metrics on in-domain data. To our knowledge, we propose the first neural models that, under the same computing constraints, achieve similar latency (less than 4ms difference) as traditional BM25, while having similar performance (less than 10% MRR@10 reduction) as the state-of-the-art single-stage neural rankers on in-domain data.},
36
+ booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
37
+ pages = {2220–2226},
38
+ numpages = {7},
39
+ keywords = {splade, latency, information retrieval, sparse representations},
40
+ location = {Madrid, Spain},
41
+ series = {SIGIR '22}
42
+ }
43
+ ```
splade_doc/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/tmp-network/user/classanc/CoCodenser/flops_mlm_together/10_epochs/distilbert_256_64_0.001/",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForMaskedLM"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.11.3",
23
+ "vocab_size": 30522
24
+ }
splade_doc/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee43cec6d21979da10d6520dbe1705a251debe690878ccebe2d6e9e9c3b0930e
3
+ size 267982639
splade_doc/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
splade_doc/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
splade_doc/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "/tmp-network/user/classanc/CoCodenser/flops_mlm_together/10_epochs/distilbert_256_64_0.001/", "tokenizer_class": "DistilBertTokenizer"}
splade_doc/vocab.txt ADDED
The diff for this file is too large to render. See raw diff