File size: 5,370 Bytes
44551ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5b5171
44551ff
 
 
 
 
 
 
 
 
 
 
 
 
bbcd3c1
 
 
 
 
 
 
 
 
 
 
 
 
44551ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01add28
 
 
 
 
 
44551ff
 
bbcd3c1
 
 
 
 
 
 
 
 
 
 
 
 
44551ff
 
 
 
 
 
 
 
20a75c8
 
 
44551ff
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
"""AfriQA GOLD Passages dataset."""


import json
import os
from textwrap import dedent

import datasets


_HOMEPAGE = "https://github.com/masakhane-io/afriqa"

_DESCRIPTION = """\
AfriQA: Cross-lingual Open-Retrieval Question Answering for African Languages
AfriQA is the first cross-lingual question-answering (QA) dataset with a focus on African languages. 
The dataset includes over 12,000 XOR QA examples across 10 African languages, making it an invaluable resource for developing more equitable QA technology.
"""

_CITATION = """\
"""

_URL = "https://github.com/masakhane-io/afriqa/raw/main/data/gold_passages/"

_LANG_2_PIVOT = {
    "bem": "en",
    "fon": "fr",
    "hau": "en",
    "ibo": "en",
    "kin": "en",
    "swa": "en",
    "twi": "en",
    "yor": "en",
    "zul": "en",
}

_LANG_2_SPLITS = {
    "bem": ["train", "dev", "test"],
    "fon": ["train", "dev", "test"],
    "hau": ["train", "dev", "test"],
    "ibo": ["train", "dev", "test"],
    "kin": ["train", "dev", "test"],
    "swa": ["test"],
    "twi": ["train", "dev", "test"],
    "yor": ["train", "test"],
    "zul": ["train", "dev", "test"],

}

class AfriQAConfig(datasets.BuilderConfig):
    """BuilderConfig for AfriQA"""

    def __init__(self, **kwargs):
        """BuilderConfig for AfriQA.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(AfriQAConfig, self).__init__(**kwargs)


class AfriQA(datasets.GeneratorBasedBuilder):
    """AfriQA dataset."""

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        AfriQAConfig(name="bem", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Bemba dataset"),
        AfriQAConfig(name="fon", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Fon dataset"),
        AfriQAConfig(name="hau", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Hausa dataset"),
        AfriQAConfig(name="ibo", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Igbo dataset"),
        AfriQAConfig(name="kin", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Kinyarwanda dataset"),
        AfriQAConfig(name="swa", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Swahili dataset"),
        AfriQAConfig(name="twi", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Twi dataset"),
        AfriQAConfig(name="wol", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Wolof dataset"),
        AfriQAConfig(name="yor", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Yoruba dataset"),
        AfriQAConfig(name="zul", version=datasets.Version("1.0.0"), description="AfriQA Gold Passages Zulu dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "question_lang": datasets.Value("string"),
                    "question_translated": datasets.Value("string"),
                    "context": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "answer_pivot": datasets.Value("string"),
                    "answer_start": datasets.Value("string"),
                    "answer_lang": datasets.Value("string"),
                }
            ),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""

        urls_to_download = {}

        for split in _LANG_2_SPLITS[self.config.name]:
            urls_to_download[split] = f"{_URL}{self.config.name}/gold_span_passages.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.{split}.json"

        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        splits_list = []

        for split in _LANG_2_SPLITS[self.config.name]:
            
            if split == "train":
                splits_list.append(datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}))
            elif split == "dev":
                splits_list.append(datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}))
            elif split == "test":
                splits_list.append(datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}))
        

        return splits_list

    def _generate_examples(self, filepath):
        """Yields examples."""
        with open(filepath, encoding="utf-8-sig") as f:
            for _, row in enumerate(f):
                example = json.loads(row)
                _id = example["id"]

                if not example["context"] or not  example["answer_pivot"]["answer_start"]:
                    continue

                yield _id, {
                    "question_lang": example["question_lang"],
                    "question_translated": example["question_translated"],
                    "context": example["context"],
                    "title": example["title"],
                    "answer_pivot": example["answer_pivot"]["text"][0],
                    "answer_start": example["answer_pivot"]["answer_start"][0],
                    "answer_lang": example["answer_lang"],
                }