import os

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from tqdm import tqdm

from rnn.imdb import dataset
import rnn.imdb.lib as lib


class ImdbModel(nn.Module):
    def __init__(self):
        super(ImdbModel, self).__init__()
        self.embedding = nn.Embedding(len(lib.ws), embedding_dim=lib.embedding_dim)
        # 这里的input_size要和上面的nn.Embedding中的embedding_dim保持一致，因为上一个的output是下面的input，要传入形状参数
        self.lstm = nn.LSTM(input_size=lib.embedding_dim, hidden_size=lib.hidden_size, num_layers=lib.num_layers,
                            batch_first=True, bidirectional=lib.bidirectional, dropout=lib.dropout)

        # self.fc = nn.Linear(lib.length * 100, 2)  # 2代表就两类，pos或者neg
        self.fc = nn.Linear(lib.hidden_size * 2, 2)  # 用lstm，入参就是lib.hidden_size * 2，经过lstm已经转换不再是lib.length * 100了

    def forward(self, input):
        # input形状是[batch_size,length]
        x = self.embedding(input)  # 进行embedding变形，[batch_size,length]->[batch_size,length,100]
        # [batch_size,length,100]->[batch_size,length,num_layer*hidden_size],([num_layer*2,batch_size,hidden_size],[num_layer*2,batch_size,hidden_size]) h_n中num_layer*2中的2指的是正反向
        x, (h_n, c_n) = self.lstm(x)
        # 获取正反两个方向的最后一次output，进行concat
        output_forward = h_n[-2, :, :]  # 正向最后一次输出
        output_backward = h_n[-1, :, :]  # 反向最后一次输出
        output = torch.cat([output_forward, output_backward], dim=-1)  # [batch_size,hidden_size*2]
        # 此时的output已经是[batch_size,hidden_size*2]这种形状，就不用变形了，以前多一个维度，所以要做一次view
        # x = output.view([-1, lib.length * 100])  # [batch_size,length,100]->[batch_size,length*100]
        out = self.fc(output)  # 优化：可以再添加一个新的全连接层作为输出层，使用激活函数进行处理，也可以继续再加一个单向的LSTM
        return F.log_softmax(out, dim=-1)  # 对output进行一次softmax计算，softmax是分类计算，所以可以进行多次计算，sigmoid是全局计算，只需要计算一次


model = ImdbModel()
model.to(lib.device)
optimizer = Adam(model.parameters(), 0.001)
# 加载模型
if os.path.exists("imdb/model/model.pkl"):
    model.load_state_dict(torch.load("imdb/model/model.pkl"))
if os.path.exists("imdb/model/optimizer.pkl"):
    optimizer.load_state_dict(torch.load("imdb/model/optimizer.pkl"))


def train(epoch):
    for i in range(epoch):
        for idx, (input, target) in enumerate(dataset.get_data_loader(True)):
            input = input.to(lib.device)
            target = target.to(lib.device)
            optimizer.zero_grad()
            output = model(input)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()
            print(epoch, idx, loss.item())
            if idx % 100 == 0:
                torch.save(model.state_dict(), "imdb/model/model.pkl")
                torch.save(optimizer.state_dict(), "imdb/model/optimizer.pkl")


# 模型评估
def eval():
    loss_list = []
    acc_list = []
    data_loader = dataset.get_data_loader(False, lib.test_batch_size)
    for idx, (input, target) in tqdm(enumerate(data_loader), total=len(data_loader)):
        input = input.to(lib.device)
        target = target.to(lib.device)
        with torch.no_grad():
            output = model(input)
            loss = F.nll_loss(output, target)
            loss_list.append(loss.cpu().item())
            pred = output.max(dim=-1)[-1]
            cur_acc = pred.eq(target).float().mean()
            acc_list.append(cur_acc.cpu().item())
    print("total loss,acc:", np.mean(loss_list), np.mean(acc_list))


if __name__ == '__main__':
    # train(1)
    eval()
