Datasets:
Commit
•
d430949
1
Parent(s):
ad0b1f8
Host pn_summary data on the Hub instead of Google Drive (#4586)
Browse files* Replace data URL with Hub one
* Update metadata JSON
* Update documentation card
* Update dummy data
Commit from https://github.com/huggingface/datasets/commit/6d2a970f25447a204690ab9404d85559cf43f51b
- README.md +0 -1
- dataset_infos.json +1 -1
- dummy/1.0.0/1.0.0/dummy_data.zip +2 -2
- pn_summary.py +2 -1
README.md
CHANGED
@@ -53,7 +53,6 @@ pretty_name: Persian News Summary (PnSummary)
|
|
53 |
|
54 |
## Dataset Description
|
55 |
|
56 |
-
- **Homepage:** https://github.com/hooshvare/pn-summary/
|
57 |
- **Repository:** https://github.com/hooshvare/pn-summary/
|
58 |
- **Paper:** https://arxiv.org/abs/2012.11204
|
59 |
- **Leaderboard:** [More Information Needed]
|
|
|
53 |
|
54 |
## Dataset Description
|
55 |
|
|
|
56 |
- **Repository:** https://github.com/hooshvare/pn-summary/
|
57 |
- **Paper:** https://arxiv.org/abs/2012.11204
|
58 |
- **Leaderboard:** [More Information Needed]
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"1.0.0": {"description": "A well-structured summarization dataset for the Persian language consists of 93,207 records. It is prepared for Abstractive/Extractive tasks (like cnn_dailymail for English). It can also be used in other scopes like Text Generation, Title Generation, and News Category Classification.\nIt is imperative to consider that the newlines were replaced with the `[n]` symbol. Please interpret them into normal newlines (for ex. `t.replace(\"[n]\", \"\n\")`) and then use them for your purposes.\n", "citation": "@article{pnSummary, title={Leveraging ParsBERT and Pretrained mT5 for Persian Abstractive Text Summarization},\nauthor={Mehrdad Farahani, Mohammad Gharachorloo, Mohammad Manthouri},\nyear={2020},\neprint={2012.11204},\narchivePrefix={arXiv},\nprimaryClass={cs.CL}\n}\n", "homepage": "https://github.com/hooshvare/pn-summary", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"num_classes": 18, "names": ["Economy", "Roads-Urban", "Banking-Insurance", "Agriculture", "International", "Oil-Energy", "Industry", "Transportation", "Science-Technology", "Local", "Sports", "Politics", "Art-Culture", "Society", "Health", "Research", "Education-University", "Tourism"], "
|
|
|
1 |
+
{"1.0.0": {"description": "A well-structured summarization dataset for the Persian language consists of 93,207 records. It is prepared for Abstractive/Extractive tasks (like cnn_dailymail for English). It can also be used in other scopes like Text Generation, Title Generation, and News Category Classification.\nIt is imperative to consider that the newlines were replaced with the `[n]` symbol. Please interpret them into normal newlines (for ex. `t.replace(\"[n]\", \"\n\")`) and then use them for your purposes.\n", "citation": "@article{pnSummary, title={Leveraging ParsBERT and Pretrained mT5 for Persian Abstractive Text Summarization},\nauthor={Mehrdad Farahani, Mohammad Gharachorloo, Mohammad Manthouri},\nyear={2020},\neprint={2012.11204},\narchivePrefix={arXiv},\nprimaryClass={cs.CL}\n}\n", "homepage": "https://github.com/hooshvare/pn-summary", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"num_classes": 18, "names": ["Economy", "Roads-Urban", "Banking-Insurance", "Agriculture", "International", "Oil-Energy", "Industry", "Transportation", "Science-Technology", "Local", "Sports", "Politics", "Art-Culture", "Society", "Health", "Research", "Education-University", "Tourism"], "id": null, "_type": "ClassLabel"}, "categories": {"dtype": "string", "id": null, "_type": "Value"}, "network": {"num_classes": 6, "names": ["Tahlilbazaar", "Imna", "Shana", "Mehr", "Irna", "Khabaronline"], "id": null, "_type": "ClassLabel"}, "link": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "pn_summary", "config_name": "1.0.0", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 309436493, "num_examples": 82022, "dataset_name": "pn_summary"}, "validation": {"name": "validation", "num_bytes": 21311817, "num_examples": 5592, "dataset_name": "pn_summary"}, "test": {"name": "test", "num_bytes": 20936820, "num_examples": 5593, "dataset_name": "pn_summary"}}, "download_checksums": {"https://huggingface.co/datasets/pn_summary/resolve/main/data/pn_summary.zip": {"num_bytes": 89591141, "checksum": "49aa6a5fdb11244714f9bbe69517f2079ab934c9c565e272a977fbd8d2d404f7"}}, "download_size": 89591141, "post_processing_size": null, "dataset_size": 351685130, "size_in_bytes": 441276271}}
|
dummy/1.0.0/1.0.0/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4bef6326580c30ece8de2d34fa2bc6fe261432d837dd8cbee75a70de12a820ab
|
3 |
+
size 12694
|
pn_summary.py
CHANGED
@@ -39,9 +39,10 @@ It is imperative to consider that the newlines were replaced with the `[n]` symb
|
|
39 |
_HOMEPAGE = "https://github.com/hooshvare/pn-summary"
|
40 |
_LICENSE = "MIT License"
|
41 |
|
|
|
42 |
_URLs = {
|
43 |
"1.0.0": {
|
44 |
-
"data": "
|
45 |
"features": [
|
46 |
{"name": "id", "type": datasets.Value("string")},
|
47 |
{"name": "title", "type": datasets.Value("string")},
|
|
|
39 |
_HOMEPAGE = "https://github.com/hooshvare/pn-summary"
|
40 |
_LICENSE = "MIT License"
|
41 |
|
42 |
+
_REPO = "https://huggingface.co/datasets/pn_summary/resolve/main/data"
|
43 |
_URLs = {
|
44 |
"1.0.0": {
|
45 |
+
"data": f"{_REPO}/pn_summary.zip",
|
46 |
"features": [
|
47 |
{"name": "id", "type": datasets.Value("string")},
|
48 |
{"name": "title", "type": datasets.Value("string")},
|