import torch
import jieba
import os
import re
import pickle
from torch.utils.data import DataLoader,Dataset
from torch import nn,optim
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')

max_len = 200
file_path = r'data/aclImdb'
ws = pickle.load(open('./model/ws.pkl','rb'))
##### 一、 准备数据
###1. 定义tokenizen
def tokenizen(text):
    fileters = ['"','#','$','%','&','\(','\)','\*','\+',',','-','\.','/',':',';','<','=','>','@'
        ,'\[','\\','\]','^','_','`','\{','\|','\}','~','\t','\n','\x97','\x96','”','“',]
    text = re.sub("<.*?>"," ",text,flags=re.S)
    text = re.sub("|".join(fileters),' ',text,flags=re.S)
    return [i.strip().lower() for i in text.split()]

###2.准备dataset
class IMDBDataset(Dataset):
    def __init__(self,train=True):
        super(IMDBDataset,self).__init__()
        self.train_data_path = r'data/aclImdb/train'
        self.test_data_path = r'data/aclImdb/test'
        data_path = self.train_data_path if train else self.test_data_path

        temp_data_path = [os.path.join(data_path,'pos'),os.path.join(data_path,'neg')]
        self.total_file_path = []
        for path in temp_data_path:
            file_name_list = os.listdir(path)
            file_path_list = [os.path.join(path,i) for i in file_name_list if i.endswith('.txt')]
            self.total_file_path.extend(file_path_list)

    def __getitem__(self, index):
        filepath = self.total_file_path[index]
        label_str = filepath.split('\\')[-2]
        label = 0 if label_str == 'neg' else 1
        content = open(filepath,encoding='utf-8').read()
        tokens = tokenizen(content)
        return tokens,label


    def __len__(self):
        return len(self.total_file_path)


def collate_fn(batch):
    content,label = list(zip(*batch))
    content = [ws.transform(i,max_len=max_len) for i in content]
    content = torch.LongTensor(content)
    label = torch.LongTensor(label)
    return content,label

### 3.实例化数据，并转化为Dataloader
def get_dataloader(train=True):
    imdb_dataset = IMDBDataset(train)
    data_loader = DataLoader(imdb_dataset,batch_size=128,shuffle=True,collate_fn=collate_fn)
    return data_loader


####  二、 构建模型
class IMDBModel(nn.Module):
    def __init__(self):
        super(IMDBModel,self).__init__()
        self.hidden_size = 128
        self.num_layer = 2
        self.embedding  = nn.Embedding(len(ws),100)
        self.lstm = nn.LSTM(100,hidden_size=self.hidden_size,num_layers=self.num_layer,batch_first=True,bidirectional=True,dropout=0.5)
        self.fc = nn.Linear(self.hidden_size*2,2)


    def forward(self,input):
        x = self.embedding(input)  ### wordembedding 操作 x: [batch,maxlen,100]
        x,(h_n,c_n) = self.lstm(x)    #### 形状：x: [batch,maxlen,2*hidden_size]    h_n: [2 *num_layer,batch,hidden_size]

        #### 获取两个方向最后一次的output,进行concat
        output_f = h_n[-2,:,:]
        output_b = h_n[-1,:,:]
        out = torch.cat([output_f,output_b],dim=-1) ### [batch,hidden_size *2]

        out = self.fc(out)
        return F.log_softmax(out,dim = -1)


####  三、 模型训练
model = IMDBModel().to(device)
optimizer = optim.Adam(model.parameters(),lr = 0.001)

if os.path.exists('./model/model.pkl'):
    model.load_state_dict(torch.load('./model/model.pkl'))
    optimizer.load_state_dict(torch.load(('./model/optimizer.pkl')))

def train(epoch):
    for idx,(input,target) in tqdm(enumerate(get_dataloader()),total=len(get_dataloader()),ascii=True,desc='训练'):
        input = input.to(device)
        target = target.to(device)
        output = model(input)
        loss = F.nll_loss(output,target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if idx % 100 ==0:
            print('idx:{},loss:{}'.format(idx,loss.item()))
            torch.save(model.state_dict(), './model/model.pkl')
            torch.save(optimizer.state_dict(), './model/optimizer.pkl')


####  四、 模型评估
def test():
    loss_list = []
    acc_list = []
    model.eval()
    test_data_loader = get_dataloader(train=False)
    for idx, (input, target) in tqdm(enumerate(test_data_loader),total=len(test_data_loader),ascii=True,desc='测试'):
        with torch.no_grad():
            output = model(input)
            test_loss = F.nll_loss(output, target)
            loss_list.append(test_loss)
            ### 计算准确率
            pred = output.max(dim=-1)[-1]
            cur_acc = pred.eq(target).float().mean()
            acc_list.append(cur_acc)

    print('平均准确率：{},平均损失：{}'.format(np.mean(acc_list), np.mean(loss_list)))




if __name__ == '__main__':

#     for idx,(input,target) in enumerate(get_dataloader()):
#         print(idx)
#         print(input)
#         print(target)
#         break
    for i in range(3):
        # print(i)
        # train(i)
        test()