Felix commited on
Commit
e3e784a
1 Parent(s): 1d799ca

changed URL to github and added 3 new datasets (swedn, swefracas, swediangostics)

Browse files
Files changed (1) hide show
  1. superlim-2.py +78 -21
superlim-2.py CHANGED
@@ -108,24 +108,22 @@ The original dataset can be found here https://github.com/trtm/AURC. The test s
108
  _argumentation_sentences_DESCRIPTION_CITATION = """\
109
  """
110
 
111
- # TODO: Add link to the official dataset URLs here
112
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
113
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
114
- _URL = "https://huggingface.co/datasets/sbx/superlim-2/raw/main/data/"
115
  _TASKS = {
116
- "absabank": "ABSAbank-Imm",
117
- "dalaj": "DaLAJ",
118
- "swesim_relatedness": "SuperSim_relatedness",
119
- "swesim_similarity": "SuperSim_similarity",
120
- "sweana": "SweAnalogy",
121
- "swefaq": "SweFAQ",
122
- "swepar": "SweParaphrase",
123
- "swesat": "SweSAT-synonyms",
124
- "swewic": "SweWIC",
125
- "argumentation_sent":"argumentation_sentences"
 
 
126
  }
127
 
128
-
129
  class SuperLimConfig(datasets.BuilderConfig):
130
  """BuilderConfig for SuperLim."""
131
 
@@ -171,7 +169,8 @@ class SuperLim(datasets.GeneratorBasedBuilder):
171
  datasets.BuilderConfig(name="swepar", version=VERSION, description=_SwePar_DESCRIPTION),
172
  datasets.BuilderConfig(name="swesat", version=VERSION, description=_SweSat_DESCRIPTION),
173
  datasets.BuilderConfig(name="swewic", version=VERSION, description=_SweWic_DESCRIPTION),
174
- datasets.BuilderConfig(name="argumentation_sent", version=VERSION, description=_argumentation_sentences_DESCRIPTION)
 
175
  ]
176
 
177
  def _info(self):
@@ -267,8 +266,7 @@ class SuperLim(datasets.GeneratorBasedBuilder):
267
  "end_1": datasets.Value("string"),
268
  "end_2": datasets.Value("string"),
269
  }
270
- )
271
-
272
  elif self.config.name == 'argumentation_sent':
273
  features = datasets.Features(
274
  {
@@ -277,7 +275,39 @@ class SuperLim(datasets.GeneratorBasedBuilder):
277
  "sentence": datasets.Value("string")
278
  }
279
  )
280
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  else:
282
  raise ValueError(f"Subset {self.config.name} does not exist.")
283
  return datasets.DatasetInfo(
@@ -309,7 +339,7 @@ class SuperLim(datasets.GeneratorBasedBuilder):
309
  },
310
  )
311
  splits.append(split_test)
312
- if self.config.name in ("absabank", "dalaj", "swefaq", "swewic"):
313
  data_dir_dev = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"dev.{file_format}"))
314
  split_dev = datasets.SplitGenerator(
315
  name=datasets.Split.VALIDATION,
@@ -320,7 +350,7 @@ class SuperLim(datasets.GeneratorBasedBuilder):
320
  },
321
  )
322
  splits.append(split_dev)
323
- if self.config.name in ("absabank", "dalaj", "swefaq", "swewic", "argumentation_sent"):
324
  data_dir_train = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"train.{file_format}"))
325
  split_train = datasets.SplitGenerator(
326
  name=datasets.Split.TRAIN,
@@ -440,5 +470,32 @@ class SuperLim(datasets.GeneratorBasedBuilder):
440
  "sentence" : row["sentence"],
441
  }
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  else:
444
  raise ValueError(f"Subset {self.config.name} does not exist")
 
108
  _argumentation_sentences_DESCRIPTION_CITATION = """\
109
  """
110
 
111
+ _URL = "https://github.com/spraakbanken/SuperLim-dev/"
 
 
 
112
  _TASKS = {
113
+ "absabank": "absabank-imm",
114
+ "argumentation_sent":"argumentation-sentences",
115
+ "dalaj": "dalag-ged-superlim",
116
+ "swesim_relatedness": "supersim-superlim/supersim-superlim-relatedness",
117
+ "swesim_similarity": "supersim-superlim/supersim-superlim-similarity",
118
+ "sweana": "sweanalogy",
119
+ "swefaq": "swefaq",
120
+ "swepar": "sweparaphrase",
121
+ "swesat": "swesat-synonyms",
122
+ "swewic": "swewic",
123
+ "swefracas": "swefracas",
124
+ "swediagnostics": "swediagnostics"
125
  }
126
 
 
127
  class SuperLimConfig(datasets.BuilderConfig):
128
  """BuilderConfig for SuperLim."""
129
 
 
169
  datasets.BuilderConfig(name="swepar", version=VERSION, description=_SwePar_DESCRIPTION),
170
  datasets.BuilderConfig(name="swesat", version=VERSION, description=_SweSat_DESCRIPTION),
171
  datasets.BuilderConfig(name="swewic", version=VERSION, description=_SweWic_DESCRIPTION),
172
+ datasets.BuilderConfig(name="argumentation_sent", version=VERSION, description=_argumentation_sentences_DESCRIPTION),
173
+ datasets.BuilderConfig(name="swefracas", version=VERSION, description=_argumentation_sentences_DESCRIPTION)
174
  ]
175
 
176
  def _info(self):
 
266
  "end_1": datasets.Value("string"),
267
  "end_2": datasets.Value("string"),
268
  }
269
+ )
 
270
  elif self.config.name == 'argumentation_sent':
271
  features = datasets.Features(
272
  {
 
275
  "sentence": datasets.Value("string")
276
  }
277
  )
278
+ elif self.config.name == 'swefracas':
279
+ features = datasets.Features(
280
+ {
281
+ "id": datasets.Value("id"),
282
+ "original_id": datasets.Value("string"),
283
+ "attribute": datasets.Value("string"),
284
+ "value": datasets.Value("string")
285
+ }
286
+ )
287
+ elif self.config.name == 'swediagnostics':
288
+ features = datasets.Features(
289
+ {
290
+ 'lexical_semantics': datasets.Value("string"),
291
+ 'predicate_argument_structure': datasets.Value("string"),
292
+ 'logic': datasets.Value("string"),
293
+ 'knowledge': datasets.Value("string"),
294
+ 'domain': datasets.Value("string"),
295
+ 'premise': datasets.Value("string"),
296
+ 'hypothesis': datasets.Value("string"),
297
+ 'label':datasets.Value("string")
298
+ }
299
+ )
300
+
301
+ elif self.config.name == 'swedn':
302
+ features = datasets.Features(
303
+ {
304
+ "id": datasets.Value("id"),
305
+ "headline": datasets.Value("string"),
306
+ "summary": datasets.Value("string"),
307
+ "article": datasets.Value("string"),
308
+ "article_category": datasets.Value("string")
309
+ }
310
+ )
311
  else:
312
  raise ValueError(f"Subset {self.config.name} does not exist.")
313
  return datasets.DatasetInfo(
 
339
  },
340
  )
341
  splits.append(split_test)
342
+ if self.config.name in ("absabank", "dalaj", "swefaq", "swewic", "swedn"):
343
  data_dir_dev = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"dev.{file_format}"))
344
  split_dev = datasets.SplitGenerator(
345
  name=datasets.Split.VALIDATION,
 
350
  },
351
  )
352
  splits.append(split_dev)
353
+ if self.config.name in ("absabank", "dalaj", "swefaq", "swewic", "argumentation_sent", "swedn"):
354
  data_dir_train = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"train.{file_format}"))
355
  split_train = datasets.SplitGenerator(
356
  name=datasets.Split.TRAIN,
 
470
  "sentence" : row["sentence"],
471
  }
472
 
473
+ elif self.config.name == "swediagnostics":
474
+ yield key, {
475
+ 'lexical_semantics': row['lexical_semantics'],
476
+ 'predicate_argument_structure': row['predicate_argument_structure'],
477
+ 'logic': row['logic'],
478
+ 'knowledge': row['knowledge'],
479
+ 'domain': row['domain'],
480
+ 'premise': row['premise'],
481
+ 'hypothesis': row['hypothesis'],
482
+ 'label': row['label']
483
+ }
484
+
485
+ elif self.config.name == "swefracas":
486
+ yield key, {
487
+ 'original_id': row['original_id'],
488
+ 'attribute': row['attribute'],
489
+ 'value': row['value']
490
+ }
491
+
492
+ elif self.config.name == "swedn":
493
+ yield key, {
494
+ 'id': row['id'],
495
+ 'headline': row['headline'],
496
+ 'summary': row['summary'],
497
+ 'article': row['article'],
498
+ 'article_category': row['article_category']
499
+ }
500
  else:
501
  raise ValueError(f"Subset {self.config.name} does not exist")