import sys
import torch
import torch.nn as nn
from time import time
import json
import random
from transformers import AutoModel, AutoTokenizer, AutoConfig
from utils import build_iterator
from sklearn import metrics
import torch.nn.functional as F
import numpy as np
from derivative_bert_models.models.base_components import PositionalEncoding

DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
bert_pre_path ="./bert_uncased_model/"
output_filep = "./output/bert.ckpt"

class DcBert(nn.Module):
    def __init__(self):
        super().__init__()
        self.bert = AutoModel.from_pretrained(bert_pre_path)
        self.tokenizer = AutoTokenizer.from_pretrained(bert_pre_path)
#         trans_layer = nn.TransformerEncoderLayer(self.bert.config.hidden_size, 8)
#         self.trans_encoder = nn.TransformerEncoder(trans_layer, 1)
        self.dense = nn.Linear(2 * self.bert.config.hidden_size, 1)
        self.origin_text = [] # 代表了回复文本
        self.origin_text_to_embeddings = {} #代表了回复文本向量，与其一一对应
        self.origin_text_embeddings = []
        self.origin_text_nums = 1
        self.sigmoid = nn.Sigmoid()

    def forward(self, x1, x2):
        # bert
        _tx1 = self.tokenizer(x1, padding=True, return_tensors='pt')
        _tx2 = self.tokenizer(x2, padding=True, return_tensors='pt')
        _tx1.to(DEVICE)
        _tx2.to(DEVICE)
        _x1 = self.bert(**_tx1)[0]
        _x2 = self.bert(**_tx2)[0]
        
#         _x1 = _x1[:,0,:].unsqueeze(1)
#         _x2 = _x2[:,0,:].unsqueeze(1)
       
#         z = torch.cat([_x1,_x2],1)
#         z = F.dropout(z, p=0.5)
#         x=self.trans_encoder(z)
#         x = torch.reshape(x,(x.shape[0],-1))
        
        x = torch.cat([_x1[:, 0, :], _x2[:, 0, :]], dim=-1)
        x = F.dropout(x, p=0.5)
        out = self.dense(x)
        out = self.sigmoid(out)
        return out
    
    #一种是没有给回复，基于全量的response进行匹配内容
    def search_all(self, x1):
        #_tx2代表了response 对应document:_x1
        begin_time=time()
        if self.origin_text_nums==0:
            print("请填入回复文本，使用response。。。")
            return
        _tx2 =  self.origin_text_embeddings  #document
        _tx1 = self.tokenizer([x1], padding=True, return_tensors='pt')
        _tx1.to(DEVICE)
        _x1 = self.bert(**_tx1)[0]
        _x1 = _x1.repeat(self.origin_text_nums,1,1)
        x = torch.cat([_x1[:, 0, :], _x2[:, 0, :]], dim=-1)

        # using dense to compute score.
        out = self.dense(x)
        end_time = time()
        print("预测耗费时间：{}s".format(str(end_time-begin_time)))
        
        return out
    
    #一种是给了n个回复的
    def search(self, x1, responses):
        _tx1 = self.tokenizer(x1, padding=True, return_tensors='pt')
        _tx1.to(DEVICE)
        pass
        
        
    def build_index(self,texts):
        self.origin_text_nums = len(texts)
        self.origin_text = texts
        _texts = self.tokenizer(texts, padding=True, return_tensors='pt')
        _texts.to(DEVICE)
        self.origin_text_embeddings = self.bert(**_texts)[0]
        for text, embedding in zip(texts, self.origin_text_embeddings):
            self.origin_text_to_embeddings[text]=embedding