lc_quad2-sparqltotext / lc_quad2-sparqltotext.py
glecorve's picture
Inflate JSON dataset
22b65de
raw
history blame
4.64 kB
import os
import zipfile
import json
import base64
import datasets
try:
import gitlab
except ImportError:
print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
_CITATION = """\
@inproceedings{lecorve2022sparql2text,
title={Coqar: Question rewriting on coqa},
author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
year={2022}
}
"""
_HOMEPAGE = ""
_URLS = {
"train": "json/train.json",
"valid": "json/valid.json",
"test": "json/test.json"
}
_DESCRIPTION = """\
Special version of LCQuAD-2.0 for the SPARQL-to-Text task
"""
class LCQuAD20_SPARQL2Text(datasets.GeneratorBasedBuilder):
"""
LCQuAD_2.0-SPARQL2Text: Special version of LCQuAD-2.0 for the SPARQL-to-Text task
"""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"uid": datasets.Value('int32'),
"NNQT_question": datasets.Value('string'),
"paraphrased_question": datasets.Value('string'),
"question": datasets.Value('string'),
"simplified_query": datasets.Value('string'),
"sparql_dbpedia18": datasets.Value('string'),
"sparql_wikidata": datasets.Value('string'),
"answer": [datasets.Value("string")],
"solved_answer": [datasets.Value("string")],
"subgraph": datasets.Value('string'),
"template": datasets.Value('string'),
"template_id": datasets.Value('string'),
"template_index": datasets.Value('int32')
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset
supervised_keys=("simplified_query", "question"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
paths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": dl_manager.extract(paths['train']),
"split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
"split": "valid"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_manager.extract(paths['test']),
"split": "test"}
)
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
def transform_sample(original_sample):
transformed_sample = {
"uid": -1,
"NNQT_question": "",
"paraphrased_question": "",
"question": "",
"simplified_query": "",
"sparql_dbpedia18": "",
"sparql_wikidata": "",
"answer": [],
"solved_answer": [],
"subgraph": "",
"template": "",
"template_id": "",
"template_index": -1
}
transformed_sample.update(original_sample)
return transformed_sample
# Yields (key, example) tuples from the dataset
with open(filepath,'r') as f:
data = json.load(f)
key = 0
for it in data:
yield key, transform_sample(it)
key += 1