Datasets:
mteb
/

Samoed KennethEnevoldsen commited on
Commit
16d7a28
1 Parent(s): aaf5428

fix models path (#25)

Browse files

* fix models path

* fix test

* add last models

* fix CI test

* try tests

* add comment

* remove __init__

* Update results.py

Co-authored-by: Kenneth Enevoldsen <[email protected]>

* upd pahts

* just one results

* remove comment

* upd paths

* move tests

* remove os

* move refresh paths

* remove text-embedding-preview-0409

Revert "rename"

This reverts commit e252add3a54d8380b30f425b04c80ad6d5780ea4.

rename

* allow test to fail

* allow test to fail

---------

Co-authored-by: Kenneth Enevoldsen <[email protected]>

.gitignore CHANGED
@@ -1,5 +1,6 @@
1
  # python
2
  __pycache__
 
3
 
4
  # vscode
5
  .vscode/
 
1
  # python
2
  __pycache__
3
+ .venv
4
 
5
  # vscode
6
  .vscode/
paths.json CHANGED
The diff for this file is too large to render. See raw diff
 
results.py CHANGED
@@ -55,7 +55,6 @@ MODELS = [
55
  "DanskBERT",
56
  "FollowIR-7B",
57
  "GritLM-7B",
58
- "GritLM-7B-noinstruct",
59
  "LASER2",
60
  "LLM2Vec-Llama-2-supervised",
61
  "LLM2Vec-Llama-2-unsupervised",
@@ -106,7 +105,6 @@ MODELS = [
106
  "contriever-base-msmarco",
107
  "cross-en-de-roberta-sentence-transformer",
108
  "dfm-encoder-large-v1",
109
- "dfm-sentence-encoder-large-1",
110
  "distilbert-base-25lang-cased",
111
  "distilbert-base-en-fr-cased",
112
  "distilbert-base-en-fr-es-pt-it-cased",
@@ -129,6 +127,7 @@ MODELS = [
129
  "elser-v2",
130
  "embedder-100p",
131
  "facebook-dpr-ctx_encoder-multiset-base",
 
132
  "flan-t5-base",
133
  "flan-t5-large",
134
  "flaubert_base_cased",
@@ -193,14 +192,18 @@ MODELS = [
193
  "sentence-t5-large",
194
  "sentence-t5-xl",
195
  "sentence-t5-xxl",
196
- "sentence-transformers__LaBSE",
197
- "sentence-transformers__all-MiniLM-L12-v2",
198
- "sentence-transformers__all-MiniLM-L6-v2",
199
- "sentence-transformers__all-mpnet-base-v2",
200
- "sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2",
201
- "sentence-transformers__paraphrase-multilingual-mpnet-base-v2",
202
  "sgpt-bloom-1b7-nli",
203
  "sgpt-bloom-7b1-msmarco",
 
 
 
 
 
 
 
 
 
204
  "silver-retriever-base-v1",
205
  "st-polish-paraphrase-from-distilroberta",
206
  "st-polish-paraphrase-from-mpnet",
@@ -254,6 +257,13 @@ MODELS = [
254
  ]
255
 
256
 
 
 
 
 
 
 
 
257
  # Needs to be run whenever new files are added
258
  def get_paths():
259
  import collections, json, os
@@ -263,13 +273,17 @@ def get_paths():
263
  if not os.path.isdir(results_model_dir):
264
  print(f"Skipping {results_model_dir}")
265
  continue
 
 
 
 
266
  for revision_folder in os.listdir(results_model_dir):
267
  if not os.path.isdir(os.path.join(results_model_dir, revision_folder)):
268
  continue
269
  for res_file in os.listdir(os.path.join(results_model_dir, revision_folder)):
270
  if (res_file.endswith(".json")) and not(res_file.endswith(("overall_results.json", "model_meta.json"))):
271
  results_model_file = os.path.join(results_model_dir, revision_folder, res_file)
272
- files[model_dir].append(results_model_file)
273
  with open("paths.json", "w") as f:
274
  json.dump(files, f, indent=2)
275
  return files
@@ -305,7 +319,6 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
305
 
306
  def _split_generators(self, dl_manager):
307
  path_file = dl_manager.download_and_extract(URL)
308
- # Local debugging:
309
  with open(path_file) as f:
310
  files = json.load(f)
311
  downloaded_files = dl_manager.download_and_extract(files[self.config.name])
 
55
  "DanskBERT",
56
  "FollowIR-7B",
57
  "GritLM-7B",
 
58
  "LASER2",
59
  "LLM2Vec-Llama-2-supervised",
60
  "LLM2Vec-Llama-2-unsupervised",
 
105
  "contriever-base-msmarco",
106
  "cross-en-de-roberta-sentence-transformer",
107
  "dfm-encoder-large-v1",
 
108
  "distilbert-base-25lang-cased",
109
  "distilbert-base-en-fr-cased",
110
  "distilbert-base-en-fr-es-pt-it-cased",
 
127
  "elser-v2",
128
  "embedder-100p",
129
  "facebook-dpr-ctx_encoder-multiset-base",
130
+ "facebookdragon-plus-context-encoder",
131
  "flan-t5-base",
132
  "flan-t5-large",
133
  "flaubert_base_cased",
 
192
  "sentence-t5-large",
193
  "sentence-t5-xl",
194
  "sentence-t5-xxl",
195
+ "all-MiniLM-L12-v2",
 
 
 
 
 
196
  "sgpt-bloom-1b7-nli",
197
  "sgpt-bloom-7b1-msmarco",
198
+ "SGPT-125M-weightedmean-nli-bitfit",
199
+ "SGPT-1.3B-weightedmean-msmarco-specb-bitfit",
200
+ "SGPT-5.8B-weightedmean-msmarco-specb-bitfit-que",
201
+ "SGPT-5.8B-weightedmean-msmarco-specb-bitfit",
202
+ "SGPT-5.8B-weightedmean-nli-bitfit",
203
+ "SGPT-2.7B-weightedmean-msmarco-specb-bitfit",
204
+ "SGPT-125M-weightedmean-msmarco-specb-bitfit-que",
205
+ "SGPT-125M-weightedmean-msmarco-specb-bitfit-doc",
206
+ "SGPT-125M-weightedmean-msmarco-specb-bitfit",
207
  "silver-retriever-base-v1",
208
  "st-polish-paraphrase-from-distilroberta",
209
  "st-polish-paraphrase-from-mpnet",
 
257
  ]
258
 
259
 
260
+ def get_model_for_current_dir(dir_name: str) -> str | None:
261
+ for model in MODELS:
262
+ if model == dir_name or ("__" in dir_name and dir_name.split("__")[1] == model):
263
+ return model
264
+ return None
265
+
266
+
267
  # Needs to be run whenever new files are added
268
  def get_paths():
269
  import collections, json, os
 
273
  if not os.path.isdir(results_model_dir):
274
  print(f"Skipping {results_model_dir}")
275
  continue
276
+ model_name = get_model_for_current_dir(model_dir)
277
+ if model_name is None:
278
+ print(f"Skipping {model_dir} model dir")
279
+ continue
280
  for revision_folder in os.listdir(results_model_dir):
281
  if not os.path.isdir(os.path.join(results_model_dir, revision_folder)):
282
  continue
283
  for res_file in os.listdir(os.path.join(results_model_dir, revision_folder)):
284
  if (res_file.endswith(".json")) and not(res_file.endswith(("overall_results.json", "model_meta.json"))):
285
  results_model_file = os.path.join(results_model_dir, revision_folder, res_file)
286
+ files[model_name].append(results_model_file)
287
  with open("paths.json", "w") as f:
288
  json.dump(files, f, indent=2)
289
  return files
 
319
 
320
  def _split_generators(self, dl_manager):
321
  path_file = dl_manager.download_and_extract(URL)
 
322
  with open(path_file) as f:
323
  files = json.load(f)
324
  downloaded_files = dl_manager.download_and_extract(files[self.config.name])
tests/__init__.py ADDED
File without changes
tests/test_load_datasets.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import pytest
3
+ from datasets import load_dataset
4
+ from results import MODELS
5
+
6
+
7
+ @pytest.mark.parametrize("model", MODELS)
8
+ @pytest.mark.xfail(reason="If new model added this test will fail")
9
+ def test_load_results_from_datasets(model):
10
+ """Ensures that all models can be imported from dataset"""
11
+ path = Path(__file__).parent.parent / "results.py"
12
+ ds = load_dataset(str(path.absolute()), model, trust_remote_code=True)
tests/test_load_results.py CHANGED
@@ -23,4 +23,4 @@ def test_load_results():
23
  known_model = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
24
  known_revision = "bf3bf13ab40c3157080a7ab344c831b9ad18b5eb"
25
  assert known_model in results
26
- assert known_revision in results[known_model]
 
23
  known_model = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
24
  known_revision = "bf3bf13ab40c3157080a7ab344c831b9ad18b5eb"
25
  assert known_model in results
26
+ assert known_revision in results[known_model]