PM-AI commited on
Commit
a2dc602
1 Parent(s): 86268f9

Update germandpr-beir.py

Browse files
Files changed (1) hide show
  1. germandpr-beir.py +56 -36
germandpr-beir.py CHANGED
@@ -1,4 +1,5 @@
1
  import json
 
2
 
3
  import datasets
4
 
@@ -6,67 +7,73 @@ import datasets
6
  _VERSION = "1.0.0"
7
 
8
  _DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
9
- "the original dataset 1:1 and the other dataset is preprocessed. See official dataset card for " \
10
- "usage of dataset with BEIR."
11
 
12
- _SUBSETS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
 
13
 
14
 
15
  class GermanDPRBeir(datasets.GeneratorBasedBuilder):
16
  BUILDER_CONFIGS = (
17
  [
18
  datasets.BuilderConfig(
19
- name="queries-original",
20
- description=f"BEIR queries created 1:1 from deepset/germanDPR.",
21
  version=_VERSION,
22
  ),
23
  datasets.BuilderConfig(
24
- name="corpus-original",
25
- description=f"BEIR corpus created 1:1 from deepset/germanDPR.",
26
  version=_VERSION,
27
  ),
28
  datasets.BuilderConfig(
29
- name="queries-processed",
30
- description=f"BEIR queries created and further text-processed from deepset/germanDPR.",
31
  version=_VERSION,
32
  ),
33
  datasets.BuilderConfig(
34
- name="corpus-processed",
35
- description=f"BEIR corpus created and further text-processed from deepset/germanDPR.",
36
  version=_VERSION,
37
  ),
38
  datasets.BuilderConfig(
39
- name="qrels",
40
- description=f"BEIR qrels created from deepset/germanDPR for train and test split.",
 
 
 
 
 
41
  version=_VERSION,
42
  )
43
  ]
44
  )
45
 
46
- DEFAULT_CONFIG_NAME = "qrels"
47
 
48
  def _info(self):
49
  name = self.config.name
50
- _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
51
-
52
- if name.startswith("queries"):
53
  features = {
54
  "_id": datasets.Value("string"),
55
  "text": datasets.Value("string")
56
  }
57
- elif name.startswith("corpus"):
58
  features = {
59
  "_id": datasets.Value("string"),
60
  "title": datasets.Value("string"),
61
  "text": datasets.Value("string"),
62
  }
63
- else:
64
  # name == qrels
65
  features = {
66
  "query-id": datasets.Value("string"),
67
  "corpus-id": datasets.Value("string"),
68
  "score": datasets.Value("int32")
69
  }
 
 
70
 
71
  return datasets.DatasetInfo(
72
  description=f"{_DESCRIPTION}\n{self.config.description}",
@@ -77,26 +84,38 @@ class GermanDPRBeir(datasets.GeneratorBasedBuilder):
77
 
78
  def _split_generators(self, dl_manager):
79
  """Returns SplitGenerators."""
80
- _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
81
-
82
  name = self.config.name
83
- if name == "qrels":
84
- dl_path = dl_manager.download([
85
- "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/train.tsv",
86
- "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/test.tsv"
87
- ])
 
 
 
 
88
  return [
89
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path[0]}),
90
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path[1]})
 
 
 
 
91
  ]
92
- else:
93
- dl_path = dl_manager.download(f"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/{name}.jsonl")
94
  return [
95
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path}),
96
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path})
 
 
 
 
97
  ]
 
 
98
 
99
  def _generate_queries_data(self, filepath):
 
100
  with open(filepath, "r", encoding="utf-8") as in_file:
101
  for idx, line in enumerate(in_file):
102
  data = json.loads(line)
@@ -120,10 +139,11 @@ class GermanDPRBeir(datasets.GeneratorBasedBuilder):
120
  def _generate_examples(self, filepath):
121
  """Yields examples."""
122
  name = self.config.name
123
- if name.startswith("queries"):
124
  return self._generate_queries_data(filepath)
125
- elif name.startswith("corpus"):
126
  return self._generate_corpus_data(filepath)
127
- else:
128
- # name == qrels
129
  return self._generate_qrel_data(filepath)
 
 
 
1
  import json
2
+ import os.path
3
 
4
  import datasets
5
 
 
7
  _VERSION = "1.0.0"
8
 
9
  _DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
10
+ "the original dataset 1:1 (but deduplicated) and the other dataset is furhter preprocessed. " \
11
+ "See official dataset card for dataset usage with BEIR."
12
 
13
+ _SUBSETS = ["original-queries", "original-corpus", "original-qrels",
14
+ "processed-queries", "processed-corpus", "original-qrels"]
15
 
16
 
17
  class GermanDPRBeir(datasets.GeneratorBasedBuilder):
18
  BUILDER_CONFIGS = (
19
  [
20
  datasets.BuilderConfig(
21
+ name="original-queries",
22
+ description=f"BEIR queries created 1:1 but deduplicated from deepset/germanDPR.",
23
  version=_VERSION,
24
  ),
25
  datasets.BuilderConfig(
26
+ name="original-corpus",
27
+ description=f"BEIR corpus created 1:1 but deduplicated from deepset/germanDPR.",
28
  version=_VERSION,
29
  ),
30
  datasets.BuilderConfig(
31
+ name="original-qrels",
32
+ description=f"BEIR qrels for original version of deepset/germanDPR.",
33
  version=_VERSION,
34
  ),
35
  datasets.BuilderConfig(
36
+ name="processed-queries",
37
+ description=f"BEIR queries created, deduplicated and further text-processed from deepset/germanDPR.",
38
  version=_VERSION,
39
  ),
40
  datasets.BuilderConfig(
41
+ name="processed-corpus",
42
+ description=f"BEIR corpus created, deduplicated and further text-processed from deepset/germanDPR.",
43
+ version=_VERSION,
44
+ ),
45
+ datasets.BuilderConfig(
46
+ name="processed-qrels",
47
+ description=f"BEIR qrels for processed version of deepset/germanDPR.",
48
  version=_VERSION,
49
  )
50
  ]
51
  )
52
 
53
+ DEFAULT_CONFIG_NAME = _SUBSETS[0]
54
 
55
  def _info(self):
56
  name = self.config.name
57
+ if name.endswith("queries"):
 
 
58
  features = {
59
  "_id": datasets.Value("string"),
60
  "text": datasets.Value("string")
61
  }
62
+ elif name.endswith("corpus"):
63
  features = {
64
  "_id": datasets.Value("string"),
65
  "title": datasets.Value("string"),
66
  "text": datasets.Value("string"),
67
  }
68
+ elif name.endswith("qrels"):
69
  # name == qrels
70
  features = {
71
  "query-id": datasets.Value("string"),
72
  "corpus-id": datasets.Value("string"),
73
  "score": datasets.Value("int32")
74
  }
75
+ else:
76
+ raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
77
 
78
  return datasets.DatasetInfo(
79
  description=f"{_DESCRIPTION}\n{self.config.description}",
 
84
 
85
  def _split_generators(self, dl_manager):
86
  """Returns SplitGenerators."""
 
 
87
  name = self.config.name
88
+ if name.startswith("original"):
89
+ dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/original.tar.gz")
90
+ elif name.startswith("processed"):
91
+ dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/processed.tar.gz")
92
+ else:
93
+ raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
94
+
95
+ type1, type2 = name.split("-")
96
+ if type2 in ["corpus", "queries"]:
97
  return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/{type2}.jsonl')}),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TEST,
103
+ gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/{type2}.jsonl')})
104
  ]
105
+ elif type2 == "qrels":
 
106
  return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/qrels/train.tsv')}),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/qrels/test.tsv')})
113
  ]
114
+ else:
115
+ raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
116
 
117
  def _generate_queries_data(self, filepath):
118
+ print("filepath: ", filepath)
119
  with open(filepath, "r", encoding="utf-8") as in_file:
120
  for idx, line in enumerate(in_file):
121
  data = json.loads(line)
 
139
  def _generate_examples(self, filepath):
140
  """Yields examples."""
141
  name = self.config.name
142
+ if name.endswith("queries"):
143
  return self._generate_queries_data(filepath)
144
+ elif name.endswith("corpus"):
145
  return self._generate_corpus_data(filepath)
146
+ elif name.endswith("qrels"):
 
147
  return self._generate_qrel_data(filepath)
148
+ else:
149
+ raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')