holylovenia commited on
Commit
e1ea39e
1 Parent(s): ffe75db

Upload tico_19.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tico_19.py +71 -83
tico_19.py CHANGED
@@ -14,18 +14,17 @@
14
  # limitations under the License.
15
 
16
  import csv
17
- from fnmatch import translate
18
  import os
19
  import re
20
  from pathlib import Path
21
  from typing import Dict, List, Tuple
22
- from translate.storage.tmx import tmxfile
23
 
24
  import datasets
 
25
 
26
- from nusacrowd.utils import schemas
27
- from nusacrowd.utils.configs import NusantaraConfig
28
- from nusacrowd.utils.constants import Tasks
29
 
30
  _CITATION = """\
31
  @inproceedings{anastasopoulos-etal-2020-tico,
@@ -59,34 +58,50 @@ _CITATION = """\
59
  """
60
 
61
  # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
62
- _LANGUAGES = ["ind", "ara", "spa", "fra", "hin", "por", "rus", "zho", "eng"]
63
  _LOCAL = False
64
  _SUPPORTED_LANG_PAIRS = [
65
- ("ind", "ara"), ("ind", "spa"), ("ind", "fra"), ("ind", "hin"), ("ind", "por"), ("ind", "rus"), ("ind", "zho"), ("ind", "eng"),
66
- ("ara", "ind"), ("spa", "ind"), ("fra", "ind"), ("hin", "ind"), ("por", "ind"), ("rus", "ind"), ("zho", "ind"), ("eng", "ind")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  ]
68
 
69
- _LANG_CODE_MAP = {
70
- "ind": "id",
71
- "ara": "ar",
72
- "spa": "es-LA",
73
- "fra": "fr",
74
- "hin": "hi",
75
- "por": "pt-BR",
76
- "rus": "ru",
77
- "zho": "zh",
78
- "eng": "en"
79
- }
80
 
81
  _DATASETNAME = "tico_19"
82
 
83
  _DESCRIPTION = """\
84
- TICO-19 (Translation Initiative for COVID-19) is sampled from a variety of public sources containing
85
- COVID-19 related content, representing different domains (e.g., news, wiki articles, and others). TICO-19
86
- includes 30 documents (3071 sentences, 69.7k words) translated from English into 36 languages: Amharic,
87
- Arabic (Modern Standard), Bengali, Chinese (Simplified), Dari, Dinka, Farsi, French (European), Hausa,
88
- Hindi, Indonesian, Kanuri, Khmer (Central), Kinyarwanda, Kurdish Kurmanji, Kurdish Sorani, Lingala,
89
- Luganda, Malay, Marathi, Myanmar, Nepali, Nigerian Fulfulde, Nuer, Oromo, Pashto, Portuguese (Brazilian),
90
  Russian, Somali, Spanish (Latin American), Swahili, Congolese Swahili, Tagalog, Tamil, Tigrinya, Urdu, Zulu.
91
  """
92
 
@@ -94,25 +109,22 @@ _HOMEPAGE = "https://tico-19.github.io"
94
 
95
  _LICENSE = "CC0"
96
 
97
- _URLS = {
98
- "evaluation": "https://tico-19.github.io/data/tico19-testset.zip",
99
- "all": "https://tico-19.github.io/data/TM/all.{lang_pairs}.tmx.zip"
100
- }
101
 
102
  _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
103
 
104
  _SOURCE_VERSION = "1.0.0"
105
 
106
- _NUSANTARA_VERSION = "1.0.0"
107
 
108
 
109
- def nusantara_config_constructor(lang_source, lang_target, schema, version):
110
- """Construct NusantaraConfig with tico_19_{lang_source}_{lang_target}_{schema} as the name format"""
111
- if schema != "source" and schema != "nusantara_t2t":
112
  raise ValueError(f"Invalid schema: {schema}")
113
 
114
  if lang_source == "" and lang_target == "":
115
- return NusantaraConfig(
116
  name="tico_19_{schema}".format(schema=schema),
117
  version=datasets.Version(version),
118
  description="tico_19 {schema} schema for default language pair (eng-ind)".format(schema=schema),
@@ -120,7 +132,7 @@ def nusantara_config_constructor(lang_source, lang_target, schema, version):
120
  subset_id="tico_19",
121
  )
122
  else:
123
- return NusantaraConfig(
124
  name="tico_19_{src}_{tgt}_{schema}".format(src=lang_source, tgt=lang_target, schema=schema),
125
  version=datasets.Version(version),
126
  description="tico_19 {schema} schema for {src}-{tgt} language pair".format(src=lang_source, tgt=lang_target, schema=schema),
@@ -128,16 +140,14 @@ def nusantara_config_constructor(lang_source, lang_target, schema, version):
128
  subset_id="tico_19",
129
  )
130
 
 
131
  class Tico19(datasets.GeneratorBasedBuilder):
132
  """TICO-19 is MT dataset sampled from a variety of public sources containing COVID-19 related content"""
133
 
134
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
135
- NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
136
 
137
- BUILDER_CONFIGS = [
138
- nusantara_config_constructor(src, tgt, schema, version)
139
- for src, tgt in [("", "")] + _SUPPORTED_LANG_PAIRS for schema, version in zip(["source", "nusantara_t2t"], [_SOURCE_VERSION, _NUSANTARA_VERSION])
140
- ]
141
 
142
  DEFAULT_CONFIG_NAME = "tico_19_source"
143
 
@@ -155,7 +165,7 @@ class Tico19(datasets.GeneratorBasedBuilder):
155
  "translatorId": datasets.Value("string"),
156
  }
157
  )
158
- elif self.config.schema == "nusantara_t2t":
159
  features = schemas.text2text_features
160
 
161
  return datasets.DatasetInfo(
@@ -168,34 +178,28 @@ class Tico19(datasets.GeneratorBasedBuilder):
168
 
169
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
170
  """Returns SplitGenerators."""
171
-
172
  try:
173
- lang_pairs_config = re.search("tico_19_(.+?)_(source|nusantara_t2t)", self.config.name).group(1)
174
  lang_src, lang_tgt = lang_pairs_config.split("_")
175
  except AttributeError:
176
  lang_src, lang_tgt = "eng", "ind"
177
 
178
  lang_pairs = _LANG_CODE_MAP[lang_src] + "-" + _LANG_CODE_MAP[lang_tgt]
179
 
180
- # dev & test split only applicable to eng-ind language pair
181
- if lang_pairs in ["en-id", "id-en"]:
 
 
182
  data_dir = dl_manager.download_and_extract(_URLS["evaluation"])
183
  return [
184
  datasets.SplitGenerator(
185
  name=datasets.Split.TEST,
186
- gen_kwargs={
187
- "filepath": os.path.join(data_dir, "tico19-testset", "test", f"test.en-id.tsv"),
188
- "lang_source": lang_src,
189
- "lang_target": lang_tgt
190
- },
191
  ),
192
  datasets.SplitGenerator(
193
  name=datasets.Split.VALIDATION,
194
- gen_kwargs={
195
- "filepath": os.path.join(data_dir, "tico19-testset", "dev", f"dev.en-id.tsv"),
196
- "lang_source": lang_src,
197
- "lang_target": lang_tgt
198
- },
199
  ),
200
  ]
201
  else:
@@ -203,20 +207,16 @@ class Tico19(datasets.GeneratorBasedBuilder):
203
  return [
204
  datasets.SplitGenerator(
205
  name=datasets.Split.TRAIN,
206
- gen_kwargs={
207
- "filepath": os.path.join(data_dir, f"all.{lang_pairs}.tmx"),
208
- "lang_source": lang_src,
209
- "lang_target": lang_tgt
210
- },
211
  )
212
  ]
213
 
214
  def _generate_examples(self, filepath: Path, lang_source: str, lang_target: str) -> Tuple[int, Dict]:
215
  """Yields examples as (key, example) tuples."""
216
-
217
  if self.config.schema == "source":
218
- # eng-ind language pair dataset provided in .tsv format
219
- if (lang_source == "eng" and lang_target == "ind") or (lang_source == "ind" and lang_target == "eng"):
220
  with open(filepath, encoding="utf-8") as f:
221
  reader = csv.reader(f, delimiter="\t", quotechar='"')
222
  for id_, row in enumerate(reader):
@@ -242,7 +242,7 @@ class Tico19(datasets.GeneratorBasedBuilder):
242
  "license": row[6],
243
  "translatorId": row[7],
244
  }
245
-
246
  # all language pairs except eng-ind dataset provided in .tmx format
247
  else:
248
  with open(filepath, "rb") as f:
@@ -250,8 +250,8 @@ class Tico19(datasets.GeneratorBasedBuilder):
250
 
251
  for id_, node in enumerate(tmx_file.unit_iter()):
252
  try:
253
- url = [text for text in node.xmlelement.itertext('prop')][0]
254
- except:
255
  url = ""
256
  yield id_, {
257
  "sourceLang": _LANG_CODE_MAP[lang_source],
@@ -264,8 +264,8 @@ class Tico19(datasets.GeneratorBasedBuilder):
264
  "translatorId": "",
265
  }
266
 
267
- elif self.config.schema == "nusantara_t2t":
268
- if (lang_source == "eng" and lang_target == "ind") or (lang_source == "ind" and lang_target == "eng"):
269
  with open(filepath, encoding="utf-8") as f:
270
  reader = csv.reader(f, delimiter="\t", quotechar='"')
271
  for id_, row in enumerate(reader):
@@ -277,22 +277,10 @@ class Tico19(datasets.GeneratorBasedBuilder):
277
  else:
278
  source_string = row[3]
279
  target_string = row[2]
280
- yield id_, {
281
- "id": row[4],
282
- "text_1": source_string,
283
- "text_2": target_string,
284
- "text_1_name": lang_source,
285
- "text_2_name": lang_target
286
- }
287
  else:
288
  with open(filepath, "rb") as f:
289
  tmx_file = tmxfile(f)
290
-
291
  for id_, node in enumerate(tmx_file.unit_iter()):
292
- yield id_, {
293
- "id": node.getid(),
294
- "text_1": node.source,
295
- "text_2": node.target,
296
- "text_1_name": lang_source,
297
- "text_2_name": lang_target
298
- }
 
14
  # limitations under the License.
15
 
16
  import csv
 
17
  import os
18
  import re
19
  from pathlib import Path
20
  from typing import Dict, List, Tuple
 
21
 
22
  import datasets
23
+ from translate.storage.tmx import tmxfile
24
 
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Tasks
28
 
29
  _CITATION = """\
30
  @inproceedings{anastasopoulos-etal-2020-tico,
 
58
  """
59
 
60
  # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
61
+ _LANGUAGES = ["ind", "ara", "spa", "fra", "hin", "por", "rus", "zho", "eng", "khm", "zlm", "mya", "tgl", "tam"]
62
  _LOCAL = False
63
  _SUPPORTED_LANG_PAIRS = [
64
+ ("ind", "ara"),
65
+ ("ind", "spa"),
66
+ ("ind", "fra"),
67
+ ("ind", "hin"),
68
+ ("ind", "por"),
69
+ ("ind", "rus"),
70
+ ("ind", "zho"),
71
+ ("ind", "eng"),
72
+ ("ara", "ind"),
73
+ ("spa", "ind"),
74
+ ("fra", "ind"),
75
+ ("hin", "ind"),
76
+ ("por", "ind"),
77
+ ("rus", "ind"),
78
+ ("zho", "ind"),
79
+ ("eng", "ind"),
80
+ ("khm", "eng"),
81
+ ("eng", "khm"),
82
+ ("mya", "eng"),
83
+ ("eng", "mya"),
84
+ ("zlm", "eng"),
85
+ ("eng", "zlm"),
86
+ ("tgl", "eng"),
87
+ ("eng", "tgl"),
88
+ ("tam", "eng"),
89
+ ("eng", "tam"),
90
  ]
91
 
92
+ _LANG_CODE_MAP = {"ind": "id", "ara": "ar", "spa": "es-LA", "fra": "fr", "hin": "hi", "por": "pt-BR", "rus": "ru", "zho": "zh", "eng": "en", "khm": "km", "zlm": "ms", "mya": "my", "tgl": "tl", "tam": "ta"}
93
+
94
+ _DEVTEST_LANG_PAIRS = [_LANG_CODE_MAP[source_lang] + "-" + _LANG_CODE_MAP[target_lang] for (source_lang, target_lang) in _SUPPORTED_LANG_PAIRS if (source_lang == "eng" or target_lang == "eng")]
 
 
 
 
 
 
 
 
95
 
96
  _DATASETNAME = "tico_19"
97
 
98
  _DESCRIPTION = """\
99
+ TICO-19 (Translation Initiative for COVID-19) is sampled from a variety of public sources containing
100
+ COVID-19 related content, representing different domains (e.g., news, wiki articles, and others). TICO-19
101
+ includes 30 documents (3071 sentences, 69.7k words) translated from English into 36 languages: Amharic,
102
+ Arabic (Modern Standard), Bengali, Chinese (Simplified), Dari, Dinka, Farsi, French (European), Hausa,
103
+ Hindi, Indonesian, Kanuri, Khmer (Central), Kinyarwanda, Kurdish Kurmanji, Kurdish Sorani, Lingala,
104
+ Luganda, Malay, Marathi, Myanmar, Nepali, Nigerian Fulfulde, Nuer, Oromo, Pashto, Portuguese (Brazilian),
105
  Russian, Somali, Spanish (Latin American), Swahili, Congolese Swahili, Tagalog, Tamil, Tigrinya, Urdu, Zulu.
106
  """
107
 
 
109
 
110
  _LICENSE = "CC0"
111
 
112
+ _URLS = {"evaluation": "https://tico-19.github.io/data/tico19-testset.zip", "all": "https://tico-19.github.io/data/TM/all.{lang_pairs}.tmx.zip"}
 
 
 
113
 
114
  _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
115
 
116
  _SOURCE_VERSION = "1.0.0"
117
 
118
+ _SEACROWD_VERSION = "2024.06.20"
119
 
120
 
121
+ def seacrowd_config_constructor(lang_source, lang_target, schema, version):
122
+ """Construct SEACrowdConfig with tico_19_{lang_source}_{lang_target}_{schema} as the name format"""
123
+ if schema != "source" and schema != "seacrowd_t2t":
124
  raise ValueError(f"Invalid schema: {schema}")
125
 
126
  if lang_source == "" and lang_target == "":
127
+ return SEACrowdConfig(
128
  name="tico_19_{schema}".format(schema=schema),
129
  version=datasets.Version(version),
130
  description="tico_19 {schema} schema for default language pair (eng-ind)".format(schema=schema),
 
132
  subset_id="tico_19",
133
  )
134
  else:
135
+ return SEACrowdConfig(
136
  name="tico_19_{src}_{tgt}_{schema}".format(src=lang_source, tgt=lang_target, schema=schema),
137
  version=datasets.Version(version),
138
  description="tico_19 {schema} schema for {src}-{tgt} language pair".format(src=lang_source, tgt=lang_target, schema=schema),
 
140
  subset_id="tico_19",
141
  )
142
 
143
+
144
  class Tico19(datasets.GeneratorBasedBuilder):
145
  """TICO-19 is MT dataset sampled from a variety of public sources containing COVID-19 related content"""
146
 
147
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
148
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
149
 
150
+ BUILDER_CONFIGS = [seacrowd_config_constructor(src, tgt, schema, version) for src, tgt in [("", "")] + _SUPPORTED_LANG_PAIRS for schema, version in zip(["source", "seacrowd_t2t"], [_SOURCE_VERSION, _SEACROWD_VERSION])]
 
 
 
151
 
152
  DEFAULT_CONFIG_NAME = "tico_19_source"
153
 
 
165
  "translatorId": datasets.Value("string"),
166
  }
167
  )
168
+ elif self.config.schema == "seacrowd_t2t":
169
  features = schemas.text2text_features
170
 
171
  return datasets.DatasetInfo(
 
178
 
179
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
180
  """Returns SplitGenerators."""
181
+
182
  try:
183
+ lang_pairs_config = re.search("tico_19_(.+?)_(source|seacrowd_t2t)", self.config.name).group(1)
184
  lang_src, lang_tgt = lang_pairs_config.split("_")
185
  except AttributeError:
186
  lang_src, lang_tgt = "eng", "ind"
187
 
188
  lang_pairs = _LANG_CODE_MAP[lang_src] + "-" + _LANG_CODE_MAP[lang_tgt]
189
 
190
+ # dev & test split only applicable to eng-[sea language] language pair
191
+ if lang_pairs in set(_DEVTEST_LANG_PAIRS):
192
+ lang_sea = _LANG_CODE_MAP[lang_tgt] if lang_src == "eng" else _LANG_CODE_MAP[lang_src]
193
+
194
  data_dir = dl_manager.download_and_extract(_URLS["evaluation"])
195
  return [
196
  datasets.SplitGenerator(
197
  name=datasets.Split.TEST,
198
+ gen_kwargs={"filepath": os.path.join(data_dir, "tico19-testset", "test", f"test.en-{lang_sea}.tsv"), "lang_source": lang_src, "lang_target": lang_tgt},
 
 
 
 
199
  ),
200
  datasets.SplitGenerator(
201
  name=datasets.Split.VALIDATION,
202
+ gen_kwargs={"filepath": os.path.join(data_dir, "tico19-testset", "dev", f"dev.en-{lang_sea}.tsv"), "lang_source": lang_src, "lang_target": lang_tgt},
 
 
 
 
203
  ),
204
  ]
205
  else:
 
207
  return [
208
  datasets.SplitGenerator(
209
  name=datasets.Split.TRAIN,
210
+ gen_kwargs={"filepath": os.path.join(data_dir, f"all.{lang_pairs}.tmx"), "lang_source": lang_src, "lang_target": lang_tgt},
 
 
 
 
211
  )
212
  ]
213
 
214
  def _generate_examples(self, filepath: Path, lang_source: str, lang_target: str) -> Tuple[int, Dict]:
215
  """Yields examples as (key, example) tuples."""
216
+
217
  if self.config.schema == "source":
218
+ # eng-[sea language] language pair dataset provided in .tsv format
219
+ if f"{_LANG_CODE_MAP[lang_source]}-{_LANG_CODE_MAP[lang_target]}" in set(_DEVTEST_LANG_PAIRS):
220
  with open(filepath, encoding="utf-8") as f:
221
  reader = csv.reader(f, delimiter="\t", quotechar='"')
222
  for id_, row in enumerate(reader):
 
242
  "license": row[6],
243
  "translatorId": row[7],
244
  }
245
+
246
  # all language pairs except eng-ind dataset provided in .tmx format
247
  else:
248
  with open(filepath, "rb") as f:
 
250
 
251
  for id_, node in enumerate(tmx_file.unit_iter()):
252
  try:
253
+ url = [text for text in node.xmlelement.itertext("prop")][0]
254
+ except Exception:
255
  url = ""
256
  yield id_, {
257
  "sourceLang": _LANG_CODE_MAP[lang_source],
 
264
  "translatorId": "",
265
  }
266
 
267
+ elif self.config.schema == "seacrowd_t2t":
268
+ if f"{_LANG_CODE_MAP[lang_source]}-{_LANG_CODE_MAP[lang_target]}" in set(_DEVTEST_LANG_PAIRS):
269
  with open(filepath, encoding="utf-8") as f:
270
  reader = csv.reader(f, delimiter="\t", quotechar='"')
271
  for id_, row in enumerate(reader):
 
277
  else:
278
  source_string = row[3]
279
  target_string = row[2]
280
+ yield id_, {"id": row[4], "text_1": source_string, "text_2": target_string, "text_1_name": lang_source, "text_2_name": lang_target}
 
 
 
 
 
 
281
  else:
282
  with open(filepath, "rb") as f:
283
  tmx_file = tmxfile(f)
284
+
285
  for id_, node in enumerate(tmx_file.unit_iter()):
286
+ yield id_, {"id": node.getid(), "text_1": node.source, "text_2": node.target, "text_1_name": lang_source, "text_2_name": lang_target}