|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
from collections import defaultdict |
|
|
|
import datasets |
|
import csv |
|
from trec_car import read_data |
|
|
|
|
|
_CITATION = """\ |
|
@misc{dalton2020trec, |
|
title={TREC CAsT 2019: The Conversational Assistance Track Overview}, |
|
author={Jeffrey Dalton and Chenyan Xiong and Jamie Callan}, |
|
year={2020}, |
|
eprint={2003.13624}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.IR} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The Conversational Assistance Track (CAsT) is a new track for TREC 2019 to facilitate Conversational Information |
|
Seeking (CIS) research and to create a large-scale reusable test collection for conversational search systems. |
|
The document corpus is 38,426,252 passages from the TREC Complex Answer Retrieval (CAR) and Microsoft MAchine |
|
Reading COmprehension (MARCO) datasets. |
|
""" |
|
|
|
_HOMEPAGE = "http://www.treccast.ai" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/uva-irlab/trec-cast-2019-multi-turn/resolve/main/" |
|
_URLs = { |
|
'topics': _URL+"cast2019_test_annotated_without_context.tsv", |
|
'topics_with_context': _URL+"cast2019_test_annotated_with_context.tsv", |
|
'qrels': _URL+"2019qrels.txt", |
|
'test_collection': { |
|
'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz", |
|
'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', |
|
}, |
|
} |
|
|
|
SAMPLE_SIZE = 100000 |
|
|
|
|
|
class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="qrels", |
|
version=VERSION, |
|
description=""), |
|
datasets.BuilderConfig(name="topics", |
|
version=VERSION, |
|
description="The topics contain the queries, query IDs and their history."), |
|
datasets.BuilderConfig(name="topics_with_context", |
|
version=VERSION, |
|
description="The topics contain the queries with relevant terms from the history, query IDs and their history."), |
|
datasets.BuilderConfig(name="test_collection", |
|
version=VERSION, |
|
description="The test collection will provide the passages of TREC CAR and MSMARCO"), |
|
datasets.BuilderConfig(name="test_collection_sample", |
|
version=VERSION, |
|
description="A small sample of 20000 of the test collection passages."), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "test_collection" |
|
|
|
def _info(self): |
|
|
|
download_size = None |
|
if self.config.name == "topics": |
|
features = datasets.Features({ |
|
"qid": datasets.Value("string"), |
|
"history": datasets.features.Sequence(feature=datasets.Value('string')), |
|
"query": datasets.Value("string"), |
|
}) |
|
download_size = 6784 |
|
elif self.config.name == "topics_with_context": |
|
features = datasets.Features({ |
|
"qid": datasets.Value("string"), |
|
"history": datasets.features.Sequence(feature=datasets.Value('string')), |
|
"query": datasets.Value("string"), |
|
}) |
|
download_size = 8010 |
|
elif self.config.name == "qrels": |
|
features = datasets.Features({ |
|
"qid": datasets.Value("string"), |
|
"qrels": datasets.features.Sequence(feature=datasets.Features({ |
|
'docno': datasets.Value("string"), |
|
'relevance': datasets.Value("string"), |
|
})), |
|
}) |
|
download_size = 1138032 |
|
else: |
|
features = datasets.Features({ |
|
"docno": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
}) |
|
download_size = 5085726092 + 1035009698 |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
download_size=download_size |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
urlkey = 'test_collection' if self.config.name == 'test_collection_sample' else self.config.name |
|
my_urls = _URLs[urlkey] |
|
downloaded_files = dl_manager.download_and_extract(my_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"file": downloaded_files, |
|
"split": self.config.name |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, file, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
if split == 'qrels': |
|
qrels_file = csv.reader(open(file), delimiter=" ") |
|
qrels = defaultdict(list) |
|
for row in qrels_file: |
|
qid = row[0] |
|
docno = row[2] |
|
relevance = row[3] |
|
qrels[qid].append({'docno': docno, 'relevance': relevance}) |
|
|
|
for qid in qrels.keys(): |
|
yield qid, {'qid': qid, 'qrels': qrels[qid]} |
|
|
|
elif split == 'topics' or split == 'topics_with_context': |
|
topics_file = csv.reader(open(file), delimiter="\t") |
|
topics = defaultdict(list) |
|
for row in topics_file: |
|
qid, query = row |
|
conversation_id, question_number = qid.split('_') |
|
topics[conversation_id].append(query) |
|
|
|
for conversation_id in topics.keys(): |
|
queries = topics[conversation_id] |
|
for idx in range(len(queries)): |
|
query = queries[idx] |
|
qid = f"{conversation_id}_{str(idx+1)}" |
|
yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid}) |
|
elif split == 'test_collection' or split == 'test_collection_sample': |
|
car_file = file['car'] + "/paragraphCorpus/dedup.articles-paragraphs.cbor" |
|
msmarco_file = file['msmarco']+"/collection.tsv" |
|
is_sample = split == 'test_collection_sample' |
|
i = 0 |
|
with open(car_file, 'rb') as f: |
|
for para in read_data.iter_paragraphs(f): |
|
docid = f"CAR_{para.para_id}" |
|
yield docid, ({"docno": docid, "text": para.get_text()}) |
|
i += 1 |
|
if is_sample and i >= SAMPLE_SIZE: |
|
break |
|
|
|
i = 0 |
|
with open(msmarco_file) as f: |
|
msmarco = csv.reader(f, delimiter="\t") |
|
for line in msmarco: |
|
docid, text = line |
|
docid = f"MARCO_{docid}" |
|
yield docid, ({"docno": docid, "text": text}) |
|
i += 1 |
|
if is_sample and i >= SAMPLE_SIZE: |
|
break |
|
else: |
|
raise NotImplementedError(f"'{split}' is not yet implemented") |
|
|