import numpy from transformers import TokenClassificationPipeline class UniversalDependenciesPipeline(TokenClassificationPipeline): def __init__(self,**kwargs): super().__init__(**kwargs) x=self.model.config.label2id self.root=numpy.full((len(x)),-numpy.inf) self.arc=numpy.full((len(x)),-numpy.inf) for k,v in x.items(): if k.endswith("|[root]"): self.root[v]=0 elif k.endswith("]"): self.arc[v]=0 def _forward(self,model_inputs): import torch v=model_inputs["input_ids"][0].tolist() if len(v)<91: x=[True]*(len(v)-2) else: with torch.no_grad(): e=self.model(input_ids=torch.tensor([v]).to(self.device)) m=e.logits[0].cpu().numpy() e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True)) z=e/e.sum(axis=-1,keepdims=True) k=numpy.argmax(m,axis=1).tolist() x=[not self.model.config.id2label[p].split("|")[0].endswith(".") for p in k[1:-1]] w=(sum([1 for b in x if b])+1)*(len(x)+1)+1 for i in numpy.argsort([z[i+1,k[i+1]] for i in range(len(x))]): if w+len(x)>8191: break if not x[i]: x[i]=True w+=len(x)+1 ids=list(v) for i in range(len(x)): if x[i]: ids+=v[1:i+1]+[self.tokenizer.mask_token_id]+v[i+2:] with torch.no_grad(): e=self.model(input_ids=torch.tensor([ids]).to(self.device)) return {"logits":e.logits,"thin_out":x,**model_inputs} def check_model_type(self,supported_models): pass def postprocess(self,model_outputs,**kwargs): if "logits" not in model_outputs: return "".join(self.postprocess(x,**kwargs) for x in model_outputs) m=model_outputs["logits"][0].cpu().numpy() x=model_outputs["thin_out"] e=numpy.full((len(x),len(x),m.shape[-1]),m.min()) k=len(x)+2 for i in range(len(x)): if x[i]: for j in range(len(x)): if i==j: e[i,i]=m[k]+self.root else: e[i,j]=m[k]+self.arc k+=1 k+=1 g=self.model.config.label2id["X.|[goeswith]"] m,r=numpy.max(e,axis=2),numpy.tri(e.shape[0]) for i in range(e.shape[0]): for j in range(i+2,e.shape[1]): r[i,j]=1 if numpy.argmax(e[i,j-1])==g: if numpy.argmax(m[:,j-1])==i: r[i,j]=r[i,j-1] e[:,:,g]+=numpy.where(r==0,0,-numpy.inf) m,p=numpy.max(e,axis=2),numpy.argmax(e,axis=2) h=self.chu_liu_edmonds(m) z=[i for i,j in enumerate(h) if i==j] if len(z)>1: k,h=z[numpy.argmax(m[z,z])],numpy.min(m)-numpy.max(m) m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])] h=self.chu_liu_edmonds(m) v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if sb else b-1 for a,b in enumerate(h) if i!=a] v[i-1]=(v[i-1][0],v.pop(i)[1]) q.pop(i) elif v[i-1][1]>v[i][0]: h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a] v[i-1]=(v[i-1][0],v.pop(i)[1]) q.pop(i) t=model_outputs["sentence"].replace("\n"," ") u="# text = "+t+"\n" for i,(s,e) in enumerate(v): u+="\t".join([str(i+1),t[s:e],"_",q[i][0].replace(".",""),"_","_" if len(q[i])<3 else "|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1][1:-1],"_","_" if i+1