glecorve commited on
Commit
be41585
·
1 Parent(s): ebf97bc

Removed loading script

Browse files
Files changed (1) hide show
  1. lc_quad2-sparqltotext.py +0 -127
lc_quad2-sparqltotext.py DELETED
@@ -1,127 +0,0 @@
1
- import os
2
- import zipfile
3
- import json
4
- import base64
5
-
6
- import datasets
7
-
8
- try:
9
- import gitlab
10
- except ImportError:
11
- print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
12
-
13
- _CITATION = """\
14
- @inproceedings{lecorve2022sparql2text,
15
- title={Coqar: Question rewriting on coqa},
16
- author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
17
- journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
18
- year={2022}
19
- }
20
- """
21
-
22
- _HOMEPAGE = ""
23
-
24
- _URLS = {
25
- "train": "json/train.json",
26
- "valid": "json/valid.json",
27
- "test": "json/test.json"
28
- }
29
-
30
- _DESCRIPTION = """\
31
- Special version of LCQuAD-2.0 for the SPARQL-to-Text task
32
- """
33
-
34
-
35
- class LCQuAD20_SPARQL2Text(datasets.GeneratorBasedBuilder):
36
- """
37
- LCQuAD_2.0-SPARQL2Text: Special version of LCQuAD-2.0 for the SPARQL-to-Text task
38
- """
39
-
40
- VERSION = datasets.Version("1.0.0")
41
-
42
- def _info(self):
43
- return datasets.DatasetInfo(
44
- # This is the description that will appear on the datasets page.
45
- description=_DESCRIPTION,
46
- # datasets.features.FeatureConnectors
47
- features=datasets.Features(
48
- {
49
- "uid": datasets.Value('int32'),
50
- "NNQT_question": datasets.Value('string'),
51
- "paraphrased_question": datasets.Value('string'),
52
- "question": datasets.Value('string'),
53
- "simplified_query": datasets.Value('string'),
54
- "sparql_dbpedia18": datasets.Value('string'),
55
- "sparql_wikidata": datasets.Value('string'),
56
- "answer": [datasets.Value("string")],
57
- "solved_answer": [datasets.Value("string")],
58
- "subgraph": datasets.Value('string'),
59
- "template": datasets.Value('string'),
60
- "template_id": datasets.Value('string'),
61
- "template_index": datasets.Value('int32')
62
- }
63
- ),
64
- # If there's a common (input, target) tuple from the features,
65
- # specify them here. They'll be used if as_supervised=True in
66
- # builder.as_dataset
67
- supervised_keys=("simplified_query", "question"),
68
- # Homepage of the dataset for documentation
69
- homepage=_HOMEPAGE,
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- # Downloads the data and defines the splits
76
- # dl_manager is a datasets.download.DownloadManager that can be used to
77
- # download and extract URLs
78
- paths = dl_manager.download_and_extract(_URLS)
79
- return [
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={"filepath": dl_manager.extract(paths['train']),
83
- "split": "train"}
84
- ),
85
- datasets.SplitGenerator(
86
- name=datasets.Split.VALIDATION,
87
- gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
88
- "split": "valid"}
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TEST,
92
- gen_kwargs={"filepath": dl_manager.extract(paths['test']),
93
- "split": "test"}
94
- )
95
- ]
96
-
97
-
98
- def _generate_examples(self, filepath, split):
99
- """Yields examples."""
100
-
101
- def transform_sample(original_sample):
102
- transformed_sample = {
103
- "uid": -1,
104
- "NNQT_question": "",
105
- "paraphrased_question": "",
106
- "question": "",
107
- "simplified_query": "",
108
- "sparql_dbpedia18": "",
109
- "sparql_wikidata": "",
110
- "answer": [],
111
- "solved_answer": [],
112
- "subgraph": "",
113
- "template": "",
114
- "template_id": "",
115
- "template_index": -1
116
- }
117
- transformed_sample.update(original_sample)
118
-
119
- return transformed_sample
120
-
121
- # Yields (key, example) tuples from the dataset
122
- with open(filepath,'r') as f:
123
- data = json.load(f)
124
- key = 0
125
- for it in data:
126
- yield key, transform_sample(it)
127
- key += 1