import torch.nn as nn
import torch
import time
from snownlp import SnowNLP as snow
import pandas as pd
import requests
from bs4 import BeautifulSoup
import tqdm
import torch.optim as optim
from torch.utils.data import DataLoader,Dataset
import math

# 构建初始数据集，对于中文问题分类，根据动词进行选择
dev_file='../dev.txt'
dev_data=pd.read_table(dev_file,header=None)
train_file = pd.read_table('train.txt', header=None)
# train2_file='translate.txt'
train_file_2=pd.read_table('train_2.txt',header=None)
train_data = train_file[0]
train_data_2=train_file_2[0]
key_word_all = []
label = train_file[1]
label_2=train_file_2[1]


#通过爬取网易翻译爬虫进行中英互译
def Translate(content,type):
    url = "https://m.youdao.com/translate"
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36'
    }
    content1 =content

    # 假设我们模拟在网页上输入内容并提交表单（实际上可能需要更复杂的逻辑）
    data = {
        'inputtext': content1,
        'type': type
    }

    # 发送请求获取HTML
    res = requests.post(url=url, data=data, headers=header)

    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(res.text, 'html.parser')

    # 尝试查找包含翻译结果的部分
    translate_ul = soup.select_one('ul#translateResult')

    if translate_ul:
        translate_li = translate_ul.find('li')  # 从<ul>中查找<li>标签
        # if translate_li:
        #     #print(translate_li.get_text())  # 打印<li>标签的文本内容
        # else:
        #     print("无法找到<li>标签")
        a = translate_li.get_text()
        return a

def Do_translate(num,label_num,way1,way2):
    for i in tqdm.tqdm(label_num):
        translate = train_data[i]
        time.sleep(0.5)
        a = Translate(translate,way1)
        time.sleep(0.5)
        b= Translate(translate,way2)
        with open("translate.txt", 'a', encoding='utf-8') as file:
            file.write(a + '\t' + str(num) + "\n")
            file.write(b + '\t' + str(num) + "\n")

def back_translate(data):
    data=pd.read_table(data,header=None)
    train_data=data[0]
    train_label=data[1]
    for num,i in enumerate(tqdm.tqdm(train_data)):
        a=Translate(i,"AUTO")
        with open("train_2.txt", 'a', encoding='utf-8') as file:
            file.write(a + '\t' + str(train_label[num]) + "\n")





#利用snow库将句子中动词后的句子切割出来
def keyword(x):
    '''提取动词v以后的所有词语'''
    for i in tqdm.tqdm(x):
        i=snow(i)
        tag = i.tags
        tags = [i for i in tag]
        # 找到第一个以'v'开始的元组的索引
        index_v = 0
        for index, (word, tag) in enumerate(tags):
            if tag == 'v':
                index_v = index
                break
        # 提取从第一个'v'开始的所有元组
        v_and_after_tuples = tags[index_v:]
        v_after=[i for i,v in v_and_after_tuples]
        key_word_all.append(v_after)



#构建词典
word2index={'pad':0}
index2word={'pad':0}
dict_list=[]
def dict(x):
    for sen in x:
        sen=[i for i in sen if i !='?' and i != '.' and i != '，' and i != '。' and i!='糖尿病' and i!='血糖' and i != '胰岛素' and i != '糖'] #删除标点符号
        for j in sen:
            if j not in dict_list:
                dict_list.append(j)
    for num,data in enumerate(tqdm.tqdm(dict_list)):
        word2index[data]=num+1
        index2word[num+1]=data

#找出最长句子以便padding
def max_len(x):
    len_sen = 0
    for i in x:
        if len(i) > len_sen:
            len_sen = len(i)
    print(len_sen)

def Token(x):
    token=[]
    for sen in x:
        sen = [i for i in sen if i != '?' and i != '.' and i != '，' and i != '。' and i != '糖尿病' and i != '血糖' and i != '胰岛素' and i != '糖']  # 删除标点符号
        p=[]
        for j in sen:
            if j in word2index:
                p.append(word2index[j])
        token.append(p)
    return token

def padding(data):
    for i in data:
        if len(i)<20:
            for j in range(20-len(i)):
                i.append(0)
    return data



# 定义位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, emb_dim, dropout, max_len=5000):
        super().__init__()
        self.dropout = nn.Dropout(dropout)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, emb_dim, 2) * -(math.log(10000.0) / emb_dim))
        pe = torch.zeros(max_len, 1, emb_dim)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
    def forward(self, x):
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

class TransformEncoder(nn.Module):
    def __init__(self, embedding_dim, dropout, output_dim, n_heads, hid_dim, n_layers):
        super(TransformEncoder, self).__init__()
        self.embedding = nn.Embedding(4094,embedding_dim)
        self.pos_encoding = PositionalEncoding(embedding_dim, dropout)
        self.transformer_encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(embedding_dim, n_heads, hid_dim, dropout,batch_first=True), n_layers
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(embedding_dim, output_dim)

    def forward(self, src):
        embedded = self.embedding(src)
        embedded = self.pos_encoding(embedded.transpose(0, 1)).transpose(0, 1)
        outputs = self.transformer_encoder(embedded)
        last_output = self.dropout(outputs)
        output = self.fc(last_output)
        return output


EMB_DIM = 1000
HID_DIM = 256
N_LAYERS = 6
N_HEADS = 200
DROPOUT = 0.2
LEARNING_RATE = 0.00001
OUTPUT_DIM = 800
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = TransformEncoder(EMB_DIM, DROPOUT, OUTPUT_DIM, N_HEADS, HID_DIM, N_LAYERS)
model = model.to(device)
optimizer = optim.Adam(model.parameters(),lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss(ignore_index=0)
criterion=criterion.to(device)



if __name__ == '__main__':

    # train_label_counter = Counter(train_data[1])
    # print(train_label_counter) #Counter({3: 2723, 1: 2292, 2: 1707, 4: 897, 0: 747, 5: 634})     # 样本分布不均衡

    # 样本分布不均衡，调用百度api进行回译，增加训练结果,让文本中翻英，英翻中
    # 在上面统计中，我们发现0,4，5 样本少，所以对0,4,5进行2倍样本回译填充
    # hui_yi_label_0=[]
    # hui_yi_label_4=[]
    # hui_yi_label_5=[]
    # for num,x in enumerate(label):
    #     if x==0:
    #         hui_yi_label_0.append(num)
    #     if x==4:
    #         hui_yi_label_4.append(num)
    #     if x==5:
    #         hui_yi_label_5.append(num)
    # Do_translate(0,hui_yi_label_0,"ZH_CN2JA","ZH_CN2EN") #先中译英和中译法
    # Do_translate(4, hui_yi_label_4, "ZH_CN2JA", "ZH_CN2EN")
    # Do_translate(5, hui_yi_label_5, "ZH_CN2JA", "ZH_CN2EN")
    #将翻译的句子再回译成中文
    # back_translate(train2_file)

    #保留关键词语
    keyword(train_data)
    keyword(train_data_2)
    # merged_list = [item for sublist in key_word_all for item in sublist]
    # 计算词频，删除重复次数多的名词
    # repeat_word=Counter(merged_list)
    # print(repeat_word)   #糖尿病，胰岛素，糖，血糖，?

    #制作词典
    dict(key_word_all)
    # print(word2index) #len=4093
    #制作token然后获取句子最大长度然后padding
    token=Token(key_word_all)
    # max_len(token)
    token=padding(token)


    # print(len(token)) #13022
    label_all = list(label) + list(label_2) #len=13022
    # print(len(label_all))

    # 搭建数据集
    class DATASET(Dataset):
        def __init__(self, data, label):
            self.data = torch.tensor(data)
            self.label = torch.tensor(label)

        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx], self.label[idx]


    dataset = DATASET(token,label_all)
    dataloader = DataLoader(dataset, batch_size=200, shuffle=True,drop_last=True)  #有22个样本舍弃

    # ap = 0
    # for i in dataloader:
    #     p,o=i
    #     print(p)
    #     print(o)
    #     ap+=1
    #     if ap ==2:
    #         break

    # for epoch in range(1000):  # 一般训练几个epoch，即遍历几次数据集
    #     running_loss = 0.0
    #     for i in dataloader:
    #         # 清零参数梯度
    #         optimizer.zero_grad()
    #         src, tgt = i
    #         src = src.to(device)
    #         tgt = tgt.to(device)
    #         # 前向传播，后向传播，优化步骤
    #         output = model(src)
    #         loss = criterion(output.view(200, -1), tgt) # 计算损失  unsqueeze是扩张，squeeze是压缩
    #         loss.backward()# 后向传播，计算梯度
    #         optimizer.step()  # 更新权重，进行优化步骤
    #         running_loss += loss.item()  # 计算总损失
    #
    #     print(f'Epoch {epoch + 1}, Loss: {running_loss / (len(label_all)-22)}')  # 打印每个epoch的损失
    #     if ((epoch + 1) % 1000 == 0):
    #         torch.save(model.state_dict(), '1000.pth')

    dev_word_all = []

    Dev_Data=dev_data[0]
    Dev_label=dev_data[1]
    Dev_label=list(Dev_label)

    def keyword2(x):
        '''提取动词v以后的所有词语'''
        for i in tqdm.tqdm(x):
            i = snow(i)
            tag = i.tags
            tags = [i for i in tag]
            # 找到第一个以'v'开始的元组的索引
            index_v = 0
            for index, (word, tag) in enumerate(tags):
                if tag == 'v':
                    index_v = index
                    break
            # 提取从第一个'v'开始的所有元组
            v_and_after_tuples = tags[index_v:]
            v_after = [i for i, v in v_and_after_tuples]
            dev_word_all.append(v_after)
    keyword2(Dev_Data)
    dev_token=Token(dev_word_all)
    data=padding(dev_token)


    class Dev_data_set(Dataset):
        def __init__(self, data):
            self.data = torch.tensor(data)

        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx]


    dataset = Dev_data_set(data)
    dataloader_2 = DataLoader(dataset, batch_size=200)
    model.load_state_dict(torch.load('1000.pth'))
    model.eval()
    with torch.no_grad():
        predict_label=[]
        for i in dataloader_2:
            i=i.to(device)
            output = model(i)
            output = output.view(200, -1)
            predicted_index = torch.argmax(output, dim=1)
            predicted_index=predicted_index.cpu()
            predicted_index=predicted_index.numpy()
            predict_label.append(predicted_index.tolist())  # 输出相等的元素

        predict_label = [item for sublist in predict_label for item in sublist]
        predict_True=[]
        # print(predict_True_label)
        for pre,true in zip(predict_label,Dev_label):
            if pre == true:
                predict_True.append(pre)
        print("准确率："+str(len(predict_True)/2000))








