msra_dev / msra_dev.py
minskiter's picture
feat(data): update data
424d195
import datasets
from datasets.download.download_manager import DownloadManager
import pyarrow.parquet as pq
_DESCRIPTION = """\
The MSRA NER dataset is a Chinese Named Entity Recognition dataset
"""
_CITATION = """\
@inproceedings{levow-2006-third,
title = "The Third International {C}hinese Language Processing Bakeoff: Word Segmentation and Named Entity Recognition",
author = "Levow, Gina-Anne",
booktitle = "Proceedings of the Fifth {SIGHAN} Workshop on {C}hinese Language Processing",
month = jul,
year = "2006",
address = "Sydney, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W06-0115",
pages = "108--117",
}
"""
_URL = "https://huggingface.co/datasets/minskiter/msra_dev/resolve/main/"
_URLS = {
"train": _URL + "data/train.parquet",
'validation': _URL + "data/validation.parquet",
"test": _URL + "data/test.parquet",
}
class MSRANamedEntities(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'O',
'B-NS',
'M-NS',
'E-NS',
'S-NS',
'B-NT',
'M-NT',
'E-NT',
'S-NT',
'B-NR',
'M-NR',
'E-NR',
'S-NR'
]
)
),
}
),
supervised_keys=None,
homepage="https://aclanthology.org/W06-0115/",
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
urls_to_download = _URLS
download_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": download_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": download_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": download_files["test"]},
),
]
def _generate_examples(self, filepath):
with open(filepath,"rb") as f:
with pq.ParquetFile(f) as file:
_id = -1
for i in file.iter_batches(batch_size=64):
rows = i.to_pylist()
for row in rows:
_id+=1
yield _id, row