Annna commited on
Commit
1cb95de
1 Parent(s): 0eb96f7

Update superlim-2.py

Browse files
Files changed (1) hide show
  1. superlim-2.py +11 -2
superlim-2.py CHANGED
@@ -101,6 +101,13 @@ The Swedish Word-in-Context dataset provides a benchmark for evaluating distribu
101
  Word-in-Context dataset, SweWiC consists of 1000 sentence pairs, where each sentence in a pair contains an occurence of a potentially ambiguous focus word specific to that pair. The question posed to the tested
102
  system is whether these two occurrences represent instances of the same word sense. There are 500 same-sense pairs and 500 different-sense pairs."""
103
 
 
 
 
 
 
 
 
104
  # TODO: Add link to the official dataset URLs here
105
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
106
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
@@ -114,7 +121,8 @@ _TASKS = {
114
  "swefaq": "SweFAQ",
115
  "swepar": "SweParaphrase",
116
  "swesat": "SweSAT-synonyms",
117
- "swewic": "SweWIC"
 
118
  }
119
 
120
 
@@ -162,7 +170,8 @@ class SuperLim(datasets.GeneratorBasedBuilder):
162
  datasets.BuilderConfig(name="swefaq", version=VERSION, description=_SweFaq_DESCRIPTION),
163
  datasets.BuilderConfig(name="swepar", version=VERSION, description=_SwePar_DESCRIPTION),
164
  datasets.BuilderConfig(name="swesat", version=VERSION, description=_SweSat_DESCRIPTION),
165
- datasets.BuilderConfig(name="swewic", version=VERSION, description=_SweWic_DESCRIPTION)
 
166
  ]
167
 
168
  def _info(self):
 
101
  Word-in-Context dataset, SweWiC consists of 1000 sentence pairs, where each sentence in a pair contains an occurence of a potentially ambiguous focus word specific to that pair. The question posed to the tested
102
  system is whether these two occurrences represent instances of the same word sense. There are 500 same-sense pairs and 500 different-sense pairs."""
103
 
104
+ _argumentation_sentences_DESCRIPTION = """\
105
+ Argumentation sentences is a translated corpus for the task of identifying stance in relation to a topic. It consists of sentences labeled with pro, con or non in relation to one of six topics.
106
+ The original dataset can be found here https://github.com/trtm/AURC. The test set is manually corrected translations, the training set is machine translated. """
107
+
108
+ _argumentation_sentences_DESCRIPTION_CITATION = """\
109
+ """
110
+
111
  # TODO: Add link to the official dataset URLs here
112
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
113
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
121
  "swefaq": "SweFAQ",
122
  "swepar": "SweParaphrase",
123
  "swesat": "SweSAT-synonyms",
124
+ "swewic": "SweWIC",
125
+ "argumentation_sent":"argumentation_sentences"
126
  }
127
 
128
 
 
170
  datasets.BuilderConfig(name="swefaq", version=VERSION, description=_SweFaq_DESCRIPTION),
171
  datasets.BuilderConfig(name="swepar", version=VERSION, description=_SwePar_DESCRIPTION),
172
  datasets.BuilderConfig(name="swesat", version=VERSION, description=_SweSat_DESCRIPTION),
173
+ datasets.BuilderConfig(name="swewic", version=VERSION, description=_SweWic_DESCRIPTION),
174
+ datasets.BuilderConfig(name="argumentation_sent", version=VERSION, description=_argumentation_sentences_DESCRIPTION)
175
  ]
176
 
177
  def _info(self):