Datasets:
Tom Aarsen
commited on
Commit
•
fbbc6ca
1
Parent(s):
f272d85
Add 'document_id' and 'sentence_id' columns
Browse files- README.md +9 -4
- conll2002.py +20 -7
README.md
CHANGED
@@ -230,10 +230,13 @@ There are two languages available : Spanish (es) and Dutch (nl).
|
|
230 |
The examples look like this :
|
231 |
|
232 |
```
|
233 |
-
{
|
234 |
-
|
235 |
-
|
236 |
-
|
|
|
|
|
|
|
237 |
}
|
238 |
```
|
239 |
|
@@ -244,6 +247,8 @@ Indeed `-DOCSTART-` is a special line that acts as a boundary between two differ
|
|
244 |
### Data Fields
|
245 |
|
246 |
- `id`: id of the sample
|
|
|
|
|
247 |
- `tokens`: the tokens of the example text
|
248 |
- `ner_tags`: the NER tags of each token
|
249 |
- `pos_tags`: the POS tags of each token
|
|
|
230 |
The examples look like this :
|
231 |
|
232 |
```
|
233 |
+
{
|
234 |
+
'id': '0',
|
235 |
+
'document_id': 0,
|
236 |
+
'sentence_id': 0,
|
237 |
+
'tokens': ['Melbourne', '(', 'Australia', ')', ',', '25', 'may', '(', 'EFE', ')', '.'],
|
238 |
+
'pos_tags': [29, 21, 29, 22, 13, 59, 28, 21, 28, 22, 20],
|
239 |
+
'ner_tags': [5, 0, 5, 0, 0, 0, 0, 0, 3, 0, 0]
|
240 |
}
|
241 |
```
|
242 |
|
|
|
247 |
### Data Fields
|
248 |
|
249 |
- `id`: id of the sample
|
250 |
+
- `document_id`: an `int32` feature tracking which document the sample is from.
|
251 |
+
- `sentence_id`: an `int32` feature tracking which sentence in this document the sample is from.
|
252 |
- `tokens`: the tokens of the example text
|
253 |
- `ner_tags`: the NER tags of each token
|
254 |
- `pos_tags`: the POS tags of each token
|
conll2002.py
CHANGED
@@ -85,6 +85,8 @@ class Conll2002(datasets.GeneratorBasedBuilder):
|
|
85 |
features=datasets.Features(
|
86 |
{
|
87 |
"id": datasets.Value("string"),
|
|
|
|
|
88 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
89 |
"pos_tags": datasets.Sequence(
|
90 |
datasets.features.ClassLabel(
|
@@ -197,18 +199,26 @@ class Conll2002(datasets.GeneratorBasedBuilder):
|
|
197 |
logger.info("⏳ Generating examples from = %s", filepath)
|
198 |
with open(filepath, encoding="utf-8") as f:
|
199 |
guid = 0
|
|
|
|
|
200 |
tokens = []
|
201 |
pos_tags = []
|
202 |
ner_tags = []
|
203 |
for line in f:
|
204 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
|
|
|
|
|
|
205 |
if tokens:
|
206 |
yield guid, {
|
207 |
"id": str(guid),
|
|
|
|
|
208 |
"tokens": tokens,
|
209 |
"pos_tags": pos_tags,
|
210 |
"ner_tags": ner_tags,
|
211 |
}
|
|
|
212 |
guid += 1
|
213 |
tokens = []
|
214 |
pos_tags = []
|
@@ -219,10 +229,13 @@ class Conll2002(datasets.GeneratorBasedBuilder):
|
|
219 |
tokens.append(splits[0])
|
220 |
pos_tags.append(splits[1])
|
221 |
ner_tags.append(splits[2].rstrip())
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
|
|
|
|
|
|
|
85 |
features=datasets.Features(
|
86 |
{
|
87 |
"id": datasets.Value("string"),
|
88 |
+
"document_id": datasets.Value("int32"),
|
89 |
+
"sentence_id": datasets.Value("int32"),
|
90 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
91 |
"pos_tags": datasets.Sequence(
|
92 |
datasets.features.ClassLabel(
|
|
|
199 |
logger.info("⏳ Generating examples from = %s", filepath)
|
200 |
with open(filepath, encoding="utf-8") as f:
|
201 |
guid = 0
|
202 |
+
document_id = 0
|
203 |
+
sentence_id = 0
|
204 |
tokens = []
|
205 |
pos_tags = []
|
206 |
ner_tags = []
|
207 |
for line in f:
|
208 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
209 |
+
if line.startswith("-DOCSTART-"):
|
210 |
+
document_id += 1
|
211 |
+
sentence_id = 0
|
212 |
if tokens:
|
213 |
yield guid, {
|
214 |
"id": str(guid),
|
215 |
+
"document_id": document_id,
|
216 |
+
"sentence_id": sentence_id,
|
217 |
"tokens": tokens,
|
218 |
"pos_tags": pos_tags,
|
219 |
"ner_tags": ner_tags,
|
220 |
}
|
221 |
+
sentence_id += 1
|
222 |
guid += 1
|
223 |
tokens = []
|
224 |
pos_tags = []
|
|
|
229 |
tokens.append(splits[0])
|
230 |
pos_tags.append(splits[1])
|
231 |
ner_tags.append(splits[2].rstrip())
|
232 |
+
if tokens:
|
233 |
+
# last example
|
234 |
+
yield guid, {
|
235 |
+
"id": str(guid),
|
236 |
+
"document_id": document_id,
|
237 |
+
"sentence_id": sentence_id,
|
238 |
+
"tokens": tokens,
|
239 |
+
"pos_tags": pos_tags,
|
240 |
+
"ner_tags": ner_tags,
|
241 |
+
}
|