Convert dataset to Parquet

#5
by albertvillanova HF staff - opened
README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: IMDB
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
@@ -19,6 +18,40 @@ task_categories:
19
  task_ids:
20
  - sentiment-classification
21
  paperswithcode_id: imdb-movie-reviews
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  train-eval-index:
23
  - config: plain_text
24
  task: text-classification
@@ -68,29 +101,6 @@ train-eval-index:
68
  name: Recall weighted
69
  args:
70
  average: weighted
71
- dataset_info:
72
- features:
73
- - name: text
74
- dtype: string
75
- - name: label
76
- dtype:
77
- class_label:
78
- names:
79
- 0: neg
80
- 1: pos
81
- config_name: plain_text
82
- splits:
83
- - name: train
84
- num_bytes: 33432835
85
- num_examples: 25000
86
- - name: test
87
- num_bytes: 32650697
88
- num_examples: 25000
89
- - name: unsupervised
90
- num_bytes: 67106814
91
- num_examples: 50000
92
- download_size: 84125825
93
- dataset_size: 133190346
94
  ---
95
 
96
  # Dataset Card for "imdb"
 
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
 
18
  task_ids:
19
  - sentiment-classification
20
  paperswithcode_id: imdb-movie-reviews
21
+ pretty_name: IMDB
22
+ dataset_info:
23
+ config_name: plain_text
24
+ features:
25
+ - name: text
26
+ dtype: string
27
+ - name: label
28
+ dtype:
29
+ class_label:
30
+ names:
31
+ '0': neg
32
+ '1': pos
33
+ splits:
34
+ - name: train
35
+ num_bytes: 33432823
36
+ num_examples: 25000
37
+ - name: test
38
+ num_bytes: 32650685
39
+ num_examples: 25000
40
+ - name: unsupervised
41
+ num_bytes: 67106794
42
+ num_examples: 50000
43
+ download_size: 83446840
44
+ dataset_size: 133190302
45
+ configs:
46
+ - config_name: plain_text
47
+ data_files:
48
+ - split: train
49
+ path: plain_text/train-*
50
+ - split: test
51
+ path: plain_text/test-*
52
+ - split: unsupervised
53
+ path: plain_text/unsupervised-*
54
+ default: true
55
  train-eval-index:
56
  - config: plain_text
57
  task: text-classification
 
101
  name: Recall weighted
102
  args:
103
  average: weighted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  ---
105
 
106
  # Dataset Card for "imdb"
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"plain_text": {"description": "Large Movie Review Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well.", "citation": "@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},\n title = {Learning Word Vectors for Sentiment Analysis},\n booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n", "homepage": "http://ai.stanford.edu/~amaas/data/sentiment/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["neg", "pos"]}], "builder_name": "imdb", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 33432835, "num_examples": 25000, "dataset_name": "imdb"}, "test": {"name": "test", "num_bytes": 32650697, "num_examples": 25000, "dataset_name": "imdb"}, "unsupervised": {"name": "unsupervised", "num_bytes": 67106814, "num_examples": 50000, "dataset_name": "imdb"}}, "download_checksums": {"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz": {"num_bytes": 84125825, "checksum": "c40f74a18d3b61f90feba1e17730e0d38e8b97c05fde7008942e91923d1658fe"}}, "download_size": 84125825, "post_processing_size": null, "dataset_size": 133190346, "size_in_bytes": 217316171}}
 
 
imdb.py DELETED
@@ -1,111 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """IMDB movie reviews dataset."""
18
-
19
- import datasets
20
- from datasets.tasks import TextClassification
21
-
22
-
23
- _DESCRIPTION = """\
24
- Large Movie Review Dataset.
25
- This is a dataset for binary sentiment classification containing substantially \
26
- more data than previous benchmark datasets. We provide a set of 25,000 highly \
27
- polar movie reviews for training, and 25,000 for testing. There is additional \
28
- unlabeled data for use as well.\
29
- """
30
-
31
- _CITATION = """\
32
- @InProceedings{maas-EtAl:2011:ACL-HLT2011,
33
- author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
34
- title = {Learning Word Vectors for Sentiment Analysis},
35
- booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
36
- month = {June},
37
- year = {2011},
38
- address = {Portland, Oregon, USA},
39
- publisher = {Association for Computational Linguistics},
40
- pages = {142--150},
41
- url = {http://www.aclweb.org/anthology/P11-1015}
42
- }
43
- """
44
-
45
- _DOWNLOAD_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
46
-
47
-
48
- class IMDBReviewsConfig(datasets.BuilderConfig):
49
- """BuilderConfig for IMDBReviews."""
50
-
51
- def __init__(self, **kwargs):
52
- """BuilderConfig for IMDBReviews.
53
-
54
- Args:
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
- super(IMDBReviewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
58
-
59
-
60
- class Imdb(datasets.GeneratorBasedBuilder):
61
- """IMDB movie reviews dataset."""
62
-
63
- BUILDER_CONFIGS = [
64
- IMDBReviewsConfig(
65
- name="plain_text",
66
- description="Plain text",
67
- )
68
- ]
69
-
70
- def _info(self):
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=datasets.Features(
74
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
75
- ),
76
- supervised_keys=None,
77
- homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
78
- citation=_CITATION,
79
- task_templates=[TextClassification(text_column="text", label_column="label")],
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- archive = dl_manager.download(_DOWNLOAD_URL)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
87
- ),
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split("unsupervised"),
93
- gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train", "labeled": False},
94
- ),
95
- ]
96
-
97
- def _generate_examples(self, files, split, labeled=True):
98
- """Generate aclImdb examples."""
99
- # For labeled examples, extract the label from the path.
100
- if labeled:
101
- label_mapping = {"pos": 1, "neg": 0}
102
- for path, f in files:
103
- if path.startswith(f"aclImdb/{split}"):
104
- label = label_mapping.get(path.split("/")[2])
105
- if label is not None:
106
- yield path, {"text": f.read().decode("utf-8"), "label": label}
107
- else:
108
- for path, f in files:
109
- if path.startswith(f"aclImdb/{split}"):
110
- if path.split("/")[2] == "unsup":
111
- yield path, {"text": f.read().decode("utf-8"), "label": -1}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b52e26e2f872d282ffac460bf9770b25ac6f102cda0e6ca7158df98c94e8b3da
3
+ size 20470363
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db47d16b5c297cc0dd625e519c81319c24c9149e70e8496de5475f6fa928342c
3
+ size 20979968
plain_text/unsupervised-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d14fbfcbb39fb7d299c38ca9f0ae6d231bf97108da85d620027ba437b6d52e
3
+ size 41996509