Update xor-tydi.py
Browse files- xor-tydi.py +34 -18
xor-tydi.py
CHANGED
|
@@ -33,17 +33,14 @@ _DESCRIPTION = "dataset load script for Wikipedia NQ"
|
|
| 33 |
|
| 34 |
base = "/home/czhang/src/task-sparse/tevatron/hgf_datasets/xor-tydi"
|
| 35 |
_DATASET_URLS = {
|
| 36 |
-
'
|
| 37 |
-
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/
|
| 38 |
'dev': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/dev/xor_dev_retrieve_eng_span_v1_1.jsonl',
|
| 39 |
'test': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl',
|
| 40 |
},
|
| 41 |
-
'
|
| 42 |
-
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/
|
| 43 |
-
'dev': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/dev/xor_dev_full_v1_1.jsonl',
|
| 44 |
-
'test': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/test/xor_test_full_q_only_v1_1.jsonl',
|
| 45 |
}
|
| 46 |
-
# 'test': f"{base}",
|
| 47 |
}
|
| 48 |
|
| 49 |
|
|
@@ -107,15 +104,29 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
|
|
| 107 |
|
| 108 |
def _generate_examples(self, files):
|
| 109 |
"""Yields examples."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
def process_train_entry(data):
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
def process_dev_test_entry(data):
|
| 120 |
return data["id"], {
|
| 121 |
"query_id": data["id"],
|
|
@@ -125,13 +136,18 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
|
|
| 125 |
"negative_passages": [],
|
| 126 |
}
|
| 127 |
|
| 128 |
-
|
|
|
|
|
|
|
| 129 |
with open(filepath, encoding="utf-8") as f:
|
| 130 |
for line in f:
|
| 131 |
data = json.loads(line)
|
| 132 |
|
| 133 |
if "id" in data and "query_id" not in data:
|
| 134 |
yield process_dev_test_entry(data)
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
| 137 |
|
|
|
|
| 33 |
|
| 34 |
base = "/home/czhang/src/task-sparse/tevatron/hgf_datasets/xor-tydi"
|
| 35 |
_DATASET_URLS = {
|
| 36 |
+
'targetQ': {
|
| 37 |
+
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/targetL_dpr_train_data.json',
|
| 38 |
'dev': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/dev/xor_dev_retrieve_eng_span_v1_1.jsonl',
|
| 39 |
'test': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl',
|
| 40 |
},
|
| 41 |
+
'engQ': {
|
| 42 |
+
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/EN_dpr_train_data.json',
|
|
|
|
|
|
|
| 43 |
}
|
|
|
|
| 44 |
}
|
| 45 |
|
| 46 |
|
|
|
|
| 104 |
|
| 105 |
def _generate_examples(self, files):
|
| 106 |
"""Yields examples."""
|
| 107 |
+
# def process_train_entry(data):
|
| 108 |
+
# if data.get('negative_passages') is None:
|
| 109 |
+
# data['negative_passages'] = []
|
| 110 |
+
# if data.get('positive_passages') is None:
|
| 111 |
+
# data['positive_passages'] = []
|
| 112 |
+
# if data.get('answers') is None:
|
| 113 |
+
# data['answers'] = []
|
| 114 |
+
# return data['query_id'], data
|
| 115 |
+
|
| 116 |
def process_train_entry(data):
|
| 117 |
+
positive_ctxs = data["positive_ctxs"]
|
| 118 |
+
hard_negative_ctxs = data["hard_negative_ctxs"]
|
| 119 |
+
# each ctx: {'title':... , 'text': ....}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
return data["id"], {
|
| 123 |
+
"query_id": data["id"],
|
| 124 |
+
"query": data["question"],
|
| 125 |
+
"answers": data.get("answers", []),
|
| 126 |
+
"positive_passages": [{**doc, 'docid': f'pos-{i}-{random.randint()}'} for i, doc in enumerate(positive_ctxs)],
|
| 127 |
+
"negative_passages": [{**doc, 'docid': f'neg-{i}-{random.randint()}'} for i, doc in enumerate(hard_negative_ctxs)],
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
def process_dev_test_entry(data):
|
| 131 |
return data["id"], {
|
| 132 |
"query_id": data["id"],
|
|
|
|
| 136 |
"negative_passages": [],
|
| 137 |
}
|
| 138 |
|
| 139 |
+
assert len(files) == 1
|
| 140 |
+
filepath = files[0]
|
| 141 |
+
if filepath.endswith(".jsonl"):
|
| 142 |
with open(filepath, encoding="utf-8") as f:
|
| 143 |
for line in f:
|
| 144 |
data = json.loads(line)
|
| 145 |
|
| 146 |
if "id" in data and "query_id" not in data:
|
| 147 |
yield process_dev_test_entry(data)
|
| 148 |
+
else:
|
| 149 |
+
with open(filepath, encoding="utf-8") as f:
|
| 150 |
+
all_data = json.load(f)
|
| 151 |
+
for data in all_data:
|
| 152 |
+
yield process_train_entry(data)
|
| 153 |
|