from transformers import BertTokenizerFast,AutoModel
from KModel import BertLSTM
import torch

# 提取句子中的关键词
class kwinference():
    def __init__(self,needEmbed=True,model_path = '/Users/xietongxue/code/python/nlp/backend/ac96.pth') -> None:
        '''
        model_path: 已训练参数存储位置
        '''
        self.flag = needEmbed
        self.pretrained_model = "bert-base-chinese"
        transformer = AutoModel.from_pretrained(self.pretrained_model)
        num_cls = 5 
        freeze = True
        num_hiddens,num_layers = 125,2
        self.model = BertLSTM(transformer, num_hiddens,num_layers, freeze,num_cls)
        self.model.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))
        self.tokenizer = BertTokenizerFast.from_pretrained(self.pretrained_model)

    # 处理数据
    def dataprocess(self,sentence):
        encoded_input = self.tokenizer.encode_plus(
                sentence, max_length=512, padding="max_length", truncation=True, return_tensors="pt"
            )
        
        input_ids = encoded_input['input_ids']
        attention_mask = encoded_input['attention_mask']

        return input_ids,attention_mask 

    def extrac_key(self,sentence):
        data_input = self.dataprocess(sentence)
        with torch.no_grad():
            embeding,logit = self.model(*data_input)
        out = logit.squeeze(0).argmax(-1)
        mask = data_input[1].to(torch.bool).squeeze(0)
        Tout = out[mask][1:-1]      # 获取样本的标签

        # 提取属性名称
        def extrac_name(start,end):
            begin_idx = torch.arange(len(Tout))[Tout==start].tolist()
            attri_name = {}
            positions = []
            for i in begin_idx:
                end_idx = i
                for j in range(i+1,len(Tout)):
                    if Tout[j].item() == end:
                        end_idx+=1
                    else:
                        # 获得关键词和对应动态词向量
                        attri_name[sentence[i:end_idx+1]] = embeding[0][i+1:end_idx+2].sum(dim=0)/embeding.shape[1]
                        positions.append((i, end_idx))
                        break
            return attri_name, positions

        attr_name,positions = extrac_name(3,4)
        veiw_name,_ = extrac_name(0,1)
        return attr_name,veiw_name,positions
    
    def __call__(self, sentence):
        attr,view,positions = self.extrac_key(sentence)
        if self.flag:
            return attr,view,positions
        else:
            return list(attr.keys()),list(view.keys()),positions

if __name__=='__main__':
    predictor = kwinference(needEmbed=False)
    sentence = '成分很安全的一款面膜，对香精敏感的美眉慎入，作为日常补水款足够了。滋润，不油腻，味道也喜欢。一直在用，感觉还是不错，这次还有小礼品，谢谢啦！还有快递小哥，今天下了雪如期送达，感谢！不敢相信这么便宜的面膜一点不比八块十块的差。面膜纸丝薄服帖我很喜欢，补水用也不会心疼。回回购'
    print(predictor(sentence))