Robin Kurtz commited on
Commit
c9ac0a9
1 Parent(s): 9477631

processing script

Browse files
Files changed (1) hide show
  1. overlim.py +483 -0
overlim.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The SuperGLUE benchmark."""
18
+
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+ _CITATION = """\
25
+ """
26
+
27
+ # You can copy an official description
28
+ _DESCRIPTION = """\
29
+ """
30
+
31
+ _HOMEPAGE = ""
32
+
33
+ _LICENSE = ""
34
+
35
+ _GLUE_CITATION = """\
36
+ @inproceedings{wang2019glue,
37
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
38
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
39
+ note={In the Proceedings of ICLR.},
40
+ year={2019}
41
+ }
42
+ """
43
+
44
+ _GLUE_DESCRIPTION = """\
45
+ GLUE, the General Language Understanding Evaluation benchmark
46
+ (https://gluebenchmark.com/) is a collection of resources for training,
47
+ evaluating, and analyzing natural language understanding systems.
48
+
49
+ """
50
+ _SST_DESCRIPTION = """\
51
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
52
+ human annotations of their sentiment. The task is to predict the sentiment of a
53
+ given sentence. We use the two-way (positive/negative) class split, and use only
54
+ sentence-level labels."""
55
+ _SST_CITATION = """\
56
+ @inproceedings{socher2013recursive,
57
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
58
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
59
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
60
+ pages={1631--1642},
61
+ year={2013}
62
+ }"""
63
+ _MRPC_DESCRIPTION = """\
64
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
65
+ sentence pairs automatically extracted from online news sources, with human annotations
66
+ for whether the sentences in the pair are semantically equivalent."""
67
+ _MRPC_CITATION = """\
68
+ @inproceedings{dolan2005automatically,
69
+ title={Automatically constructing a corpus of sentential paraphrases},
70
+ author={Dolan, William B and Brockett, Chris},
71
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
72
+ year={2005}
73
+ }"""
74
+ _QQP_DESCRIPTION = """\
75
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
76
+ community question-answering website Quora. The task is to determine whether a
77
+ pair of questions are semantically equivalent."""
78
+ _QQP_CITATION = """\
79
+ @online{WinNT,
80
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
81
+ title = {First Quora Dataset Release: Question Pairs},
82
+ year = {2017},
83
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
84
+ urldate = {2019-04-03}
85
+ }"""
86
+ _STSB_DESCRIPTION = """\
87
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
88
+ sentence pairs drawn from news headlines, video and image captions, and natural
89
+ language inference data. Each pair is human-annotated with a similarity score
90
+ from 1 to 5."""
91
+ _STSB_CITATION = """\
92
+ @article{cer2017semeval,
93
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
94
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
95
+ journal={arXiv preprint arXiv:1708.00055},
96
+ year={2017}
97
+ }"""
98
+ _MNLI_DESCRIPTION = """\
99
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
100
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
101
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
102
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
103
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
104
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
105
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
106
+ the SNLI corpus as 550k examples of auxiliary training data."""
107
+ _MNLI_CITATION = """\
108
+ @InProceedings{N18-1101,
109
+ author = "Williams, Adina
110
+ and Nangia, Nikita
111
+ and Bowman, Samuel",
112
+ title = "A Broad-Coverage Challenge Corpus for
113
+ Sentence Understanding through Inference",
114
+ booktitle = "Proceedings of the 2018 Conference of
115
+ the North American Chapter of the
116
+ Association for Computational Linguistics:
117
+ Human Language Technologies, Volume 1 (Long
118
+ Papers)",
119
+ year = "2018",
120
+ publisher = "Association for Computational Linguistics",
121
+ pages = "1112--1122",
122
+ location = "New Orleans, Louisiana",
123
+ url = "http://aclweb.org/anthology/N18-1101"
124
+ }
125
+ @article{bowman2015large,
126
+ title={A large annotated corpus for learning natural language inference},
127
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
128
+ journal={arXiv preprint arXiv:1508.05326},
129
+ year={2015}
130
+ }"""
131
+ _QNLI_DESCRIPTION = """\
132
+ The Stanford Question Answering Dataset is a question-answering
133
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
134
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
135
+ convert the task into sentence pair classification by forming a pair between each question and each
136
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
137
+ question and the context sentence. The task is to determine whether the context sentence contains
138
+ the answer to the question. This modified version of the original task removes the requirement that
139
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
140
+ is always present in the input and that lexical overlap is a reliable cue."""
141
+ _QNLI_CITATION = """\
142
+ @article{rajpurkar2016squad,
143
+ title={Squad: 100,000+ questions for machine comprehension of text},
144
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
145
+ journal={arXiv preprint arXiv:1606.05250},
146
+ year={2016}
147
+ }"""
148
+ _WNLI_DESCRIPTION = """\
149
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
150
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
151
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
152
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
153
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
154
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
155
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
156
+ new examples derived from fiction books that was shared privately by the authors of the original
157
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
158
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
159
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
160
+ training examples, they will predict the wrong label on corresponding development set
161
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
162
+ between a model's score on this task and its score on the unconverted original task. We
163
+ call converted dataset WNLI (Winograd NLI)."""
164
+ _WNLI_CITATION = """\
165
+ @inproceedings{levesque2012winograd,
166
+ title={The winograd schema challenge},
167
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
168
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
169
+ year={2012}
170
+ }"""
171
+
172
+ _SUPER_GLUE_CITATION = """\
173
+ @article{wang2019superglue,
174
+ title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
175
+ author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
176
+ journal={arXiv preprint arXiv:1905.00537},
177
+ year={2019}
178
+ }
179
+
180
+ Note that each SuperGLUE dataset has its own citation. Please see the source to
181
+ get the correct citation for each contained dataset.
182
+ """
183
+
184
+ _SUPER_GLUE_DESCRIPTION = """\
185
+ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
186
+ GLUE with a new set of more difficult language understanding tasks, improved
187
+ resources, and a new public leaderboard.
188
+
189
+ """
190
+
191
+ _BOOLQ_DESCRIPTION = """\
192
+ BoolQ (Boolean Questions, Clark et al., 2019a) is a QA task where each example consists of a short
193
+ passage and a yes/no question about the passage. The questions are provided anonymously and
194
+ unsolicited by users of the Google search engine, and afterwards paired with a paragraph from a
195
+ Wikipedia article containing the answer. Following the original work, we evaluate with accuracy."""
196
+
197
+ _CB_DESCRIPTION = """\
198
+ The CommitmentBank (De Marneffe et al., 2019) is a corpus of short texts in which at least
199
+ one sentence contains an embedded clause. Each of these embedded clauses is annotated with the
200
+ degree to which we expect that the person who wrote the text is committed to the truth of the clause.
201
+ The resulting task framed as three-class textual entailment on examples that are drawn from the Wall
202
+ Street Journal, fiction from the British National Corpus, and Switchboard. Each example consists
203
+ of a premise containing an embedded clause and the corresponding hypothesis is the extraction of
204
+ that clause. We use a subset of the data that had inter-annotator agreement above 0.85. The data is
205
+ imbalanced (relatively fewer neutral examples), so we evaluate using accuracy and F1, where for
206
+ multi-class F1 we compute the unweighted average of the F1 per class."""
207
+
208
+ _COPA_DESCRIPTION = """\
209
+ The Choice Of Plausible Alternatives (COPA, Roemmele et al., 2011) dataset is a causal
210
+ reasoning task in which a system is given a premise sentence and two possible alternatives. The
211
+ system must choose the alternative which has the more plausible causal relationship with the premise.
212
+ The method used for the construction of the alternatives ensures that the task requires causal reasoning
213
+ to solve. Examples either deal with alternative possible causes or alternative possible effects of the
214
+ premise sentence, accompanied by a simple question disambiguating between the two instance
215
+ types for the model. All examples are handcrafted and focus on topics from online blogs and a
216
+ photography-related encyclopedia. Following the recommendation of the authors, we evaluate using
217
+ accuracy."""
218
+
219
+ _RTE_DESCRIPTION = """\
220
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual competitions
221
+ on textual entailment, the problem of predicting whether a given premise sentence entails a given
222
+ hypothesis sentence (also known as natural language inference, NLI). RTE was previously included
223
+ in GLUE, and we use the same data and format as before: We merge data from RTE1 (Dagan
224
+ et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli
225
+ et al., 2009). All datasets are combined and converted to two-class classification: entailment and
226
+ not_entailment. Of all the GLUE tasks, RTE was among those that benefited from transfer learning
227
+ the most, jumping from near random-chance performance (~56%) at the time of GLUE's launch to
228
+ 85% accuracy (Liu et al., 2019c) at the time of writing. Given the eight point gap with respect to
229
+ human performance, however, the task is not yet solved by machines, and we expect the remaining
230
+ gap to be difficult to close."""
231
+
232
+ _BOOLQ_CITATION = """\
233
+ @inproceedings{clark2019boolq,
234
+ title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions},
235
+ author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina},
236
+ booktitle={NAACL},
237
+ year={2019}
238
+ }"""
239
+
240
+ _CB_CITATION = """\
241
+ @article{de marneff_simons_tonhauser_2019,
242
+ title={The CommitmentBank: Investigating projection in naturally occurring discourse},
243
+ journal={proceedings of Sinn und Bedeutung 23},
244
+ author={De Marneff, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},
245
+ year={2019}
246
+ }"""
247
+
248
+ _COPA_CITATION = """\
249
+ @inproceedings{roemmele2011choice,
250
+ title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning},
251
+ author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S},
252
+ booktitle={2011 AAAI Spring Symposium Series},
253
+ year={2011}
254
+ }"""
255
+
256
+ _RTE_CITATION = """\
257
+ @inproceedings{dagan2005pascal,
258
+ title={The PASCAL recognising textual entailment challenge},
259
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
260
+ booktitle={Machine Learning Challenges Workshop},
261
+ pages={177--190},
262
+ year={2005},
263
+ organization={Springer}
264
+ }
265
+ @inproceedings{bar2006second,
266
+ title={The second pascal recognising textual entailment challenge},
267
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
268
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
269
+ volume={6},
270
+ number={1},
271
+ pages={6--4},
272
+ year={2006},
273
+ organization={Venice}
274
+ }
275
+ @inproceedings{giampiccolo2007third,
276
+ title={The third pascal recognizing textual entailment challenge},
277
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
278
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
279
+ pages={1--9},
280
+ year={2007},
281
+ organization={Association for Computational Linguistics}
282
+ }
283
+ @inproceedings{bentivogli2009fifth,
284
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
285
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
286
+ booktitle={TAC},
287
+ year={2009}
288
+ }"""
289
+
290
+ # TODO: Add link to the official dataset URLs here
291
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
292
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
293
+ _URL = "https://huggingface.co/datasets/KBLab/overlim/resolve/main/data/"
294
+ _TASKS = {
295
+ "boolq": "boolq.tar.gz",
296
+ "cb": "cb.tar.gz",
297
+ "copa": "copa.tar.gz",
298
+ "mnli": "mnli.tar.gz",
299
+ "mrpc": "mrpc.tar.gz",
300
+ "qnli": "qnli.tar.gz",
301
+ "qqp": "qqp.tar.gz",
302
+ "rte": "rte.tar.gz",
303
+ "sst": "sst.tar.gz",
304
+ "stsb": "stsb.tar.gz",
305
+ "wnli": "wnli.tar.gz"
306
+ }
307
+ _LANGUAGES = {"sv", "da", "nb"}
308
+
309
+
310
+ class OverLimConfig(datasets.BuilderConfig):
311
+ """BuilderConfig for Suc."""
312
+ def __init__(self, name, description, features, citation, language, label_classes=("False", "True"), **kwargs):
313
+ """BuilderConfig for OverLim.
314
+ """
315
+ self.full_name = name + "_" + language
316
+ super(OverLimConfig,
317
+ self).__init__(name=self.full_name , version=datasets.Version("1.0.2"), **kwargs)
318
+ self.features = features
319
+ self.label_classes = label_classes
320
+ self.citation = citation
321
+ self.description = description
322
+ # self.name = name
323
+ self.language = language
324
+
325
+
326
+
327
+ class OverLim(datasets.GeneratorBasedBuilder):
328
+ """OverLim"""
329
+
330
+ BUILDER_CONFIGS = [[OverLimConfig(
331
+ name="boolq",
332
+ description=_BOOLQ_DESCRIPTION,
333
+ features=["question", "passage"],
334
+ label_classes=["False", "True"],
335
+ citation=_BOOLQ_CITATION,
336
+ language=lang,
337
+ ),
338
+ OverLimConfig(
339
+ name="cb",
340
+ description=_CB_DESCRIPTION,
341
+ features=["premise", "hypothesis"],
342
+ label_classes=["entailment", "contradiction", "neutral"],
343
+ citation=_CB_CITATION,
344
+ language=lang,
345
+ ),
346
+ OverLimConfig(
347
+ name="copa",
348
+ description=_COPA_DESCRIPTION,
349
+ label_classes=["choice1", "choice2"],
350
+ # Note that question will only be the X in the statement "What's
351
+ # the X for this?".
352
+ features=["premise", "choice1", "choice2", "question"],
353
+ citation=_COPA_CITATION,
354
+ language=lang,
355
+ ),
356
+ OverLimConfig(
357
+ name="rte",
358
+ description=_RTE_DESCRIPTION,
359
+ features=["premise", "hypothesis"],
360
+ label_classes=["entailment", "not_entailment"],
361
+ citation=_RTE_CITATION,
362
+ language=lang,
363
+ ),
364
+ OverLimConfig(
365
+ name="qqp",
366
+ description=_QQP_DESCRIPTION,
367
+ features=["text_a", "text_b"],
368
+ label_classes=["not_duplicate", "duplicate"],
369
+ citation=_QQP_CITATION,
370
+ language=lang,
371
+ ),
372
+ OverLimConfig(
373
+ name="qnli",
374
+ description=_QNLI_DESCRIPTION,
375
+ features=["premise", "hypothesis"],
376
+ label_classes=["entailment", "not_entailment"],
377
+ citation=_QNLI_CITATION,
378
+ language=lang,
379
+ ),
380
+ OverLimConfig(
381
+ name="stsb",
382
+ description=_STSB_DESCRIPTION,
383
+ features=["text_a", "text_b"],
384
+ citation=_STSB_CITATION,
385
+ language=lang,
386
+ ),
387
+ OverLimConfig(
388
+ name="mnli",
389
+ description=_MNLI_DESCRIPTION,
390
+ features=["premise", "hypothesis"],
391
+ label_classes=["entailment", "neutral", "contradiction"],
392
+ citation=_MNLI_CITATION,
393
+ language=lang,
394
+ ),
395
+ OverLimConfig(
396
+ name="mrpc",
397
+ description=_MRPC_DESCRIPTION,
398
+ features=["text_a", "text_b"],
399
+ label_classes=["not_equivalent", "equivalent"],
400
+ citation=_MRPC_CITATION,
401
+ language=lang,
402
+ ),
403
+ OverLimConfig(
404
+ name="wnli",
405
+ description=_WNLI_DESCRIPTION,
406
+ features=["premise", "hypothesis"],
407
+ label_classes=["not_entailment", "entailment"],
408
+ citation=_WNLI_CITATION,
409
+ language=lang,
410
+ ),
411
+ OverLimConfig(
412
+ name="sst",
413
+ description=_SST_DESCRIPTION,
414
+ features=["text"],
415
+ label_classes=["negative", "positive"],
416
+ citation=_SST_CITATION,
417
+ language=lang,
418
+ )
419
+
420
+ ] for lang in _LANGUAGES]
421
+ BUILDER_CONFIGS = [element for inner in BUILDER_CONFIGS for element in inner]
422
+
423
+ def _info(self):
424
+ features = {feature: datasets.Value("string") for feature in self.config.features}
425
+ features["idx"] = datasets.Value("int32")
426
+
427
+ return datasets.DatasetInfo(
428
+ description=_GLUE_DESCRIPTION + self.config.description,
429
+ features=datasets.Features(features),
430
+ homepage=_HOMEPAGE,
431
+ citation=self.config.citation + "\n" + _SUPER_GLUE_CITATION,
432
+ )
433
+
434
+ def _split_generators(self, dl_manager):
435
+ dl_dir = dl_manager.download_and_extract(os.path.join(_URL, self.config.lang, self.config.data_url))
436
+ dl_dir = os.path.join(_URL, self.config.lang, self.config.name)
437
+ return [
438
+ datasets.SplitGenerator(
439
+ name=datasets.Split.TRAIN,
440
+ gen_kwargs={
441
+ "data_file": os.path.join(dl_dir, "train.jsonl"),
442
+ },
443
+ ),
444
+ datasets.SplitGenerator(
445
+ name=datasets.Split.VALIDATION,
446
+ gen_kwargs={
447
+ "data_file": os.path.join(dl_dir, "val.jsonl"),
448
+ },
449
+ ),
450
+ datasets.SplitGenerator(
451
+ name=datasets.Split.TEST,
452
+ gen_kwargs={
453
+ "data_file": os.path.join(dl_dir, "test.jsonl"),
454
+ },
455
+ ),
456
+ ]
457
+
458
+ def _generate_examples(self, data_file):
459
+ with open(data_file, encoding="utf-8") as f:
460
+ for line in f:
461
+ row = json.loads(line)
462
+ example = {feature: row[feature] for feature in self.config.features}
463
+ example["idx"] = row["idx"]
464
+
465
+ if self.config.name == "copa":
466
+ example["label"] = "choice2" if row["label"] else "choice1"
467
+ else:
468
+ example["label"] = _cast_label(row["label"])
469
+ yield example["idx"], example
470
+
471
+
472
+ def _cast_label(label):
473
+ """Converts the label into the appropriate string version."""
474
+ if isinstance(label, str):
475
+ return label
476
+ elif isinstance(label, bool):
477
+ return "True" if label else "False"
478
+ return label
479
+ # elif isinstance(label, int):
480
+ # assert label in (0, 1)
481
+ # return str(label)
482
+ # else:
483
+ # raise ValueError("Invalid label format.")