yuri-no commited on
Commit
b6862f9
·
verified ·
1 Parent(s): 902ddb1

Upload full_eval.py

Browse files
Files changed (1) hide show
  1. full_eval.py +305 -0
full_eval.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, argparse, sys, pickle, shutil, subprocess
2
+ from functools import partial
3
+ import torch
4
+ from torch import Tensor
5
+ import torch.nn.functional as F
6
+ from torch.nn.parallel import DistributedDataParallel as DDP
7
+ import torch.distributed
8
+ import pathlib, datasets
9
+ import numpy as np
10
+ from transformers import AutoTokenizer, AutoModel
11
+ from datasets import load_dataset, concatenate_datasets
12
+ from sentence_transformers import SentenceTransformer
13
+
14
+ # ALL
15
+ DOC_ID_KEY = 'docid'
16
+ DOC_KEY = 'text'
17
+ QUERY_ID_KEY = 'query_id'
18
+ QUERY_KEY = 'query'
19
+
20
+ # MMARCO
21
+ # DOC_ID_KEY = 'id'
22
+ # DOC_KEY = 'text'
23
+ # QUERY_ID_KEY = 'id'
24
+ # QUERY_KEY = 'text'
25
+
26
+
27
+ def last_token_pool(last_hidden_states: Tensor,
28
+ attention_mask: Tensor) -> Tensor:
29
+ left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
30
+ if left_padding:
31
+ return last_hidden_states[:, -1]
32
+ else:
33
+ sequence_lengths = attention_mask.sum(dim=1) - 1
34
+ batch_size = last_hidden_states.shape[0]
35
+ return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
36
+
37
+
38
+ def average_pool(last_hidden_states: Tensor,
39
+ attention_mask: Tensor) -> Tensor:
40
+ last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
41
+ return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
42
+
43
+
44
+ def mean_pooling(model_output, attention_mask):
45
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
46
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
47
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
48
+
49
+
50
+ def embed_corpus(x, model, tokenizer, prefix, pooling, append_eos_token,sentence, max_length=512, normalize=True):
51
+ doc = x[DOC_KEY]
52
+ docid = x[DOC_ID_KEY]
53
+
54
+ doc = [f'{prefix}{q}' for q in doc]
55
+ if sentence:
56
+ embeddings = model.encode(doc, normalize_embeddings=True, batch_size=len(doc), device=rank)
57
+ encoding = embeddings
58
+ return {
59
+ 'encoding' : encoding,
60
+ 'id' : docid
61
+ }
62
+
63
+ if not append_eos_token:
64
+ batch_dict = tokenizer(doc, max_length=max_length, padding=True, truncation=True, return_tensors='pt').to(rank)
65
+ else:
66
+ batch_dict = tokenizer([d+tokenizer.eos_token for d in doc], max_length=max_length, padding=True, truncation=True, return_tensors='pt').to(rank)
67
+
68
+ with torch.no_grad():
69
+ with torch.cuda.amp.autocast():
70
+ outputs = model(**batch_dict)
71
+ if pooling == 'eos':
72
+ embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
73
+ elif pooling == 'average':
74
+ embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
75
+ elif pooling == 'cls':
76
+ embeddings = outputs.last_hidden_state[:, 0]
77
+ elif pooling == 'mean':
78
+ embeddings = mean_pooling(outputs, batch_dict['attention_mask'])
79
+ else:
80
+ raise Exception("Pooling not defined")
81
+ if normalize:
82
+ encoding = F.normalize(embeddings, p=2, dim=1).cpu().detach().numpy()
83
+ else:
84
+ encoding = embeddings.cpu().detach().numpy()
85
+ return {
86
+ 'encoding' : encoding,
87
+ 'id' : docid
88
+ }
89
+
90
+ def embed_queries(x, model, tokenizer, prefix, pooling, append_eos_token, sentence, max_length=512, normalize=True,):
91
+ query = x[QUERY_KEY]
92
+ query_id = x[QUERY_ID_KEY]
93
+
94
+ query = [f'{prefix}{q}' for q in query]
95
+ if sentence:
96
+ embeddings = model.encode(query, normalize_embeddings=True, batch_size=len(query), device=rank)
97
+ encoding = embeddings
98
+ return {
99
+ 'encoding' : encoding,
100
+ 'id' : query_id
101
+ }
102
+
103
+ if not append_eos_token:
104
+ batch_dict = tokenizer(query, max_length=max_length, padding=True, truncation=True, return_tensors='pt').to(rank)
105
+ else:
106
+ batch_dict = tokenizer([q+tokenizer.eos_token for q in query], max_length=max_length, padding=True, truncation=True, return_tensors='pt').to(rank)
107
+
108
+ with torch.no_grad():
109
+ with torch.cuda.amp.autocast():
110
+ outputs = model(**batch_dict)
111
+ if pooling == 'eos':
112
+ embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
113
+ elif pooling == 'average':
114
+ embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
115
+ elif pooling == 'cls':
116
+ embeddings = outputs.last_hidden_state[:, 0]
117
+ elif pooling == 'mean':
118
+ embeddings = mean_pooling(outputs, batch_dict['attention_mask'])
119
+ else:
120
+ raise Exception("Pooling not defined")
121
+ if normalize:
122
+ encoding = F.normalize(embeddings, p=2, dim=1).cpu().detach().numpy()
123
+ else:
124
+ encoding = embeddings.cpu().detach().numpy()
125
+
126
+ return {
127
+ 'encoding' : encoding,
128
+ 'id' : query_id
129
+ }
130
+
131
+
132
+ def distributed_embedding(ds, embed_function, batch_size, sort=True, value_to_sort='text'):
133
+
134
+ rank = torch.distributed.get_rank()
135
+
136
+ ds_shard_filepaths = [
137
+ os.path.join('./CACHE', f"{ds._fingerprint}_subshard_{w}.cache")
138
+ for w in range(0, world_size)
139
+ ]
140
+ print(f"\tworker {rank} saving sub-shard to {ds_shard_filepaths[rank]}")
141
+ ds_shard = ds.shard(
142
+ num_shards=world_size,
143
+ index=rank,
144
+ contiguous=True,
145
+ )
146
+
147
+ if sort:
148
+ ds_shard = ds_shard.map(lambda x: {'len' : len(x[value_to_sort])}, num_proc=64)
149
+ ds_shard = ds_shard.sort('len', reverse=True)
150
+
151
+
152
+ ds_shard = ds_shard.map(embed_function, batched=True, batch_size=batch_size, remove_columns=ds.column_names, load_from_cache_file=False)
153
+
154
+
155
+ ds_shard.save_to_disk(ds_shard_filepaths[rank])
156
+ print("rank", rank, "saving:", ds_shard_filepaths[rank])
157
+
158
+
159
+ torch.distributed.barrier()
160
+
161
+ full_dataset = concatenate_datasets(
162
+ [ds.load_from_disk(p) for p in ds_shard_filepaths]
163
+ )
164
+
165
+ torch.distributed.barrier()
166
+ print("rank", rank, "deleting:", ds_shard_filepaths[rank])
167
+ shutil.rmtree(ds_shard_filepaths[rank])
168
+ return full_dataset
169
+
170
+
171
+
172
+ def main(rank, args):
173
+
174
+
175
+ warnings.filterwarnings('ignore')
176
+ datasets.logging.set_verbosity_error()
177
+
178
+
179
+ queries = load_dataset(args.queries)
180
+ queries = queries[args.queries_split]
181
+
182
+ batch_size = args.batch_size
183
+ corpus = load_dataset(args.corpus)
184
+ corpus = corpus[args.corpus_split]
185
+
186
+ if args.tokenizer is None:
187
+ tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
188
+ else:
189
+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=True)
190
+
191
+ if args.sentence:
192
+ model = SentenceTransformer(args.model, device=rank)
193
+ model.max_seq_length = 512
194
+ else:
195
+ model = AutoModel.from_pretrained(args.model, trust_remote_code=True, attn_implementation="flash_attention_2" if 'gemma' in args.model else None, torch_dtype=torch.bfloat16).to(rank).eval()
196
+ # model = DDP(model, device_ids=[rank])
197
+
198
+ if rank == 0:
199
+ # Create folder if not present
200
+ pathlib.Path(args.output_dir,).mkdir(parents=True, exist_ok=True)
201
+
202
+ print("-"*10)
203
+
204
+ print('CORPUS embedding...')
205
+ # Embedding of the corpus
206
+ corpus_embedding = distributed_embedding(corpus, partial(embed_corpus, model=model, tokenizer=tokenizer, prefix=args.passage_prefix, pooling=args.pooling, append_eos_token=args.append_eos_token, sentence=args.sentence, normalize=args.normalize), batch_size=batch_size, sort=True, value_to_sort='text')
207
+ # Saving corpus
208
+ if rank == 0:
209
+ print('Saving embedding...')
210
+ with open(os.path.join(args.output_dir,"corpus_emb.pkl"), 'wb') as f:
211
+ pickle.dump((np.asarray(corpus_embedding['encoding'], dtype=np.float32), corpus_embedding['id']), f)
212
+ print('Embedding saved!')
213
+
214
+ torch.distributed.barrier()
215
+ print('QUERIES embedding...')
216
+ # Embedding of the queries
217
+ queries_embedding = distributed_embedding(queries, partial(embed_queries, model=model, tokenizer=tokenizer, prefix=args.query_prefix, pooling=args.pooling, append_eos_token=args.append_eos_token, sentence=args.sentence, normalize=args.normalize), batch_size=batch_size, sort=False)
218
+ # Saving queries
219
+ if rank == 0:
220
+ print('Saving embedding...')
221
+ with open(os.path.join(args.output_dir,"query_emb.pkl"), 'wb') as f:
222
+ pickle.dump((np.asarray(queries_embedding['encoding'], dtype=np.float32), queries_embedding['id']), f)
223
+ print('Embedding saved!')
224
+
225
+
226
+ if rank == 0:
227
+ print("-"*10)
228
+
229
+ print('Retrieval...')
230
+ command = [
231
+ "/opt/conda/envs/retrieval/bin/python", "-m", "tevatron.faiss_retriever",
232
+ "--query_reps", os.path.join(args.output_dir,"query_emb.pkl"),
233
+ "--passage_reps", os.path.join(args.output_dir,"corpus_emb.pkl"),
234
+ "--depth", "100",
235
+ "--batch_size", "-1",
236
+ "--save_text",
237
+ "--save_ranking_to", os.path.join(args.output_dir,"rank.txt")
238
+ ]
239
+ proc = subprocess.run(command, capture_output=True, text=True)
240
+
241
+ print("Output:", proc.stdout)
242
+
243
+
244
+ print('Converting to MARCO format...')
245
+ command = [
246
+ "/opt/conda/envs/retrieval/bin/python", "-m", "tevatron.utils.format.convert_result_to_marco",
247
+ "--input", os.path.join(args.output_dir,"rank.txt"),
248
+ "--output", os.path.join(args.output_dir,"rank.txt.marco"),
249
+ ]
250
+ proc = subprocess.run(command, capture_output=True, text=True)
251
+ print("Output:", proc.stdout)
252
+
253
+
254
+ print("Computing metrics...")
255
+ command = [
256
+ "/opt/conda/envs/retrieval/bin/python", "-m", "pyserini.eval.trec_eval",
257
+ "-c", "-M", "100",
258
+ "-m", "ndcg_cut.10",
259
+ "-m", "recall.100",
260
+ "-m", "recip_rank",
261
+ args.qrels,
262
+ os.path.join(args.output_dir,"rank.txt.marco"),
263
+ ]
264
+ proc = subprocess.run(command, capture_output=True, text=True)
265
+ print("Output:", proc.stdout)
266
+
267
+ return 'Done'
268
+ return
269
+
270
+ if __name__ == "__main__":
271
+ import warnings
272
+ warnings.filterwarnings('ignore')
273
+ rank = int(os.environ.get('LOCAL_RANK'))
274
+ torch.distributed.init_process_group("nccl", rank=rank, world_size=2)
275
+
276
+ world_size = torch.distributed.get_world_size()
277
+ print('Initialized')
278
+
279
+ parser = argparse.ArgumentParser(description="Evaluation of retrieval embedding model")
280
+
281
+ # Required arguments specified as --options
282
+ parser.add_argument('--model', type=str, required=True, help='Model to evaluate')
283
+ parser.add_argument('--batch-size', type=int, required=False, default=256, help="Batch size for eval")
284
+ parser.add_argument('--pooling', type=str, required=True, help='Pooling to use')
285
+ parser.add_argument('--append-eos-token', action="store_true", required=False, default=False, help='If append eos to sentences and queries')
286
+ parser.add_argument('--normalize', type=bool, required=False, default=True, help='If normalize embedding')
287
+ parser.add_argument('--sentence', action="store_true", required=False, default=False, help='If append eos to sentences and queries')
288
+
289
+ parser.add_argument('--tokenizer', type=str, required=False, default=None, help='Tokenizer of the model')
290
+
291
+ parser.add_argument('--corpus', type=str, required=True, help='Corpus dataset')
292
+ parser.add_argument('--corpus-split', type=str, required=False, default='dev', help='Corpus split')
293
+
294
+ parser.add_argument('--queries', type=str, required=True, help='Queries dataset')
295
+ parser.add_argument('--queries-split', type=str, required=False, default='dev', help='Queries split')
296
+
297
+ parser.add_argument('--query-prefix', type=str, required=False, default='query: ', help='Queries prefix')
298
+ parser.add_argument('--passage-prefix', type=str, required=False, default='passage: ', help='Passages prefix split')
299
+
300
+ parser.add_argument('--output-dir', type=str, required=True, help='Directory where to save results')
301
+ parser.add_argument('--qrels', type=str, required=True, help='Path to qrels for evaluation')
302
+
303
+ args = parser.parse_args()
304
+
305
+ main(rank, args)