# encoding: utf-8

import torch
from torch import nn
from torch.utils.data import DataLoader
import numpy as np

from transformers import BertTokenizer

from utils.data import load_txt, split_text
from utils.models import MyDataSet


def get_label(x):
    if isinstance(x, str):
        if x not in labels:
            labels.append(x)
        return labels.index(x)
    else:
        return labels[x]


def get_train_dataset(file_name: str):
    lines = load_txt(file_name)
    texts, str_labels = split_text(lines)
    tokenizers = [tokenizer.encode(x, max_length=max_length, padding="max_length", truncation="longest_first") for x in
                  texts]
    int_labels = []
    for token, str_lab in zip(tokenizers, str_labels):
        int_lab = get_label(str_lab)
        int_labels.append(int_lab)
    my_datasets = MyDataSet(data=tokenizers, label=int_labels)
    return my_datasets


class Conv1DNet(nn.Module):
    def __init__(self, in_channels, out_channels, num_classes, kernel_size):
        # vocab_size 21128 embedding_dim 10  batch_size 16 num_classes 10
        super(Conv1DNet, self).__init__()
        self.batch_size = batch_size
        self.embedding_dim = embedding_dim
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.conv = nn.Conv1d(in_channels=embedding_dim, out_channels=num_classes, kernel_size=kernel_size,
                              padding=padding)
        self.max_pool = nn.MaxPool1d(kernel_size=2)
        self.fc = nn.Linear()
        # self.fc = nn.Linear(num_classes, num_classes * batch_size * embedding_dim)
        # self.fc2 = nn.Linear(num_classes * batch_size * embedding_dim, num_classes)
        self.relu = nn.ReLU()
        # self.softmax = nn.Softmax(dim=2)

    def forward(self, x: torch.Tensor):
        # x.shape == (batch_size, sequence_length, in_channels)
        x = x.permute(0, 2, 1)
        x = self.conv(x)
        x = self.relu(x)
        x = self.max_pool(x)
        x = x.reshape(x.size(0), -1)
        return self.fc(x)
        # embedded = self.embedding(x)  # torch.Size([16, 64, 100])
        # conv_out = self.conv(embedded.transpose(2, 1)).transpose(2, 1)  # torch.Size([16, 64, 16000])
        # conv_out = self.fc(conv_out)  # torch.Size([16, 16000, 64])
        # conv_out = self.softmax(conv_out.transpose(1, 2))  # torch.Size([16, 1024000])
        # conv_out = conv_out.view(self.batch_size, -1)  # torch.Size([16, 10240])
        # conv_out = self.fc2(conv_out)
        #
        # return conv_out


# https://blog.csdn.net/sunny_xsc1994/article/details/82969867
# 整不明白了

labels = []

pretrained_model = "G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# pretrained_model = "D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext"
tokenizer = BertTokenizer.from_pretrained(pretrained_model)

train_file = "../../datas/train.txt"
batch_size = 16
max_length = 64
vocab_size = tokenizer.vocab_size
embedding_dim = 100

train_datasets = get_train_dataset(train_file)

train_dataloader = DataLoader(train_datasets, batch_size=batch_size)
test_dataloader = DataLoader(train_datasets, batch_size=batch_size)  # 测试，为了方便使用相同的数据了

num_class = len(labels)

model = Conv1DNet(vocab_size, embedding_dim, num_class, batch_size=batch_size)
print(model)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device=device)


def train_model(_model, traindataloder, valdataloader, criterion, optimizer, num_epochs=20):
    # 设置四个空列表用于储存损失和精度
    train_loss_all = []
    train_acc_all = []
    val_loss_all = []
    val_acc_all = []
    # since = time.time()
    for epoch in range(num_epochs):
        print("-" * 10)
        print("Epoch{}/{}".format(epoch, num_epochs - 1))
        # 每个epoch有两个阶段：训练阶段和验证阶段
        train_loss = 0.0
        train_corrects = 0
        train_num = 0
        val_loss = 0.0
        val_corrects = 0
        val_num = 0
        # 1.将模型设置为训练模式，训练后得到训练集的损失和精度
        _model.train()
        for step, batch in enumerate(traindataloder):
            textdata, target = batch[0].to(device), batch[1].long()
            out = _model(textdata)
            # out = torch.argmax(out, dim=2)
            print("out.shape=", out.shape, "out.dtype=", out.dtype)
            print("target.shape=", target.shape, "target.dtype=", target.dtype)
            loss = criterion(out, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * len(target)
            train_corrects += torch.sum(pre_lab == target.data)
            train_num += len(target)
        # 计算一个epoch在训练集上的损失和精度
        train_loss_all.append(train_loss / train_num)
        train_acc_all.append(train_corrects.double().item() / train_num)
        print("{} Train Loss:{:.4f} Train Acc:{:.4f}".format(epoch, train_loss_all[-1], train_acc_all[-1]))
        # 2.将模型设置为验证模式，训练后得到验证集的损失和精度
        _model.eval()
        for step, batch in enumerate(valdataloader):
            textdata, target = batch[0].to(device), batch[1].to(device).long()
            out = _model(textdata)
            pre_lab = torch.argmax(out, 1)  # 预测标签
            loss = criterion(out, target)  # 计算损失函数值
            val_loss += loss.item() * len(target)
            val_corrects += torch.sum(pre_lab == target.data)
            val_num += len(target)
        # 计算一个epoch在训练集上的损失和精度
        val_loss_all.append(val_loss / val_num)
        val_acc_all.append(val_corrects.double().item() / val_num)
        print("{} Val Loss:{:.4f} Val Acc:{:.4f}".format(epoch, val_loss_all[-1], val_acc_all[-1]))

    # 用一个DataFrame将每个epoch训练和验证的损失和精度打包在一起
    print({"epoch": range(num_epochs),
           "train_loss_all": train_loss_all,
           "train_acc_all": train_acc_all,
           "val_loss_all": val_loss_all,
           "val_acc_all": val_acc_all})
    return model


optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
loss_func = nn.CrossEntropyLoss()  # 使用交叉熵损失更新参数

# 对模型进行迭代训练，对所有数据训练20轮
model = train_model(model, train_dataloader, test_dataloader, loss_func, optimizer, num_epochs=20)
