# encoding: utf-8

import sys
import argparse
import os
import time
import datetime
from tqdm import tqdm
import torch
from transformers import BertTokenizer, BertModel, BertConfig
import numpy as np
from torch.utils import data
from sklearn.model_selection import train_test_split


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label1, label2):
        self.data = data
        self.label1 = label1
        self.label2 = label2

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label1[index]), np.array(self.label2[index])


class Model(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num1_class: int, num2_class: int = 1000):
        super(Model, self).__init__()
        num2_c1 = num2_class * 4
        num2_c2 = int(num2_c1 / 2)
        num2_c3 = int(num2_c2 / 2)
        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.n1fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.n1fc2 = torch.nn.Linear(bert_config.hidden_size, num1_class)
        self.n2fc2 = torch.nn.Linear(bert_config.hidden_size, num2_c1)
        self.n2fc3 = torch.nn.Linear(num2_c1, num2_c2)
        self.n2fc4 = torch.nn.Linear(num2_c2, num2_c3)
        self.n2fc5 = torch.nn.Linear(num2_c3, num2_class)
        self.relu = torch.nn.ReLU()
        self.selu = torch.nn.SELU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.n1fc1(bert_out)
        bert_out1 = self.relu(bert_out)
        bert_out1 = self.dropout(bert_out1)
        bert_out1 = self.n1fc2(bert_out1)
        bert_out2 = self.n2fc2(bert_out)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.dropout(bert_out2)
        bert_out2 = self.n2fc3(bert_out2)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.dropout(bert_out2)
        bert_out2 = self.n2fc4(bert_out2)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.n2fc5(bert_out2)
        return bert_out1, bert_out2


class ModelSmall(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num1_class: int, num2_class: int = 1000):
        super(ModelSmall, self).__init__()

        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.n1fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.n1fc2 = torch.nn.Linear(bert_config.hidden_size, num1_class)
        self.n2fc2 = torch.nn.Linear(bert_config.hidden_size, num2_class)

        self.relu = torch.nn.ReLU()
        self.selu = torch.nn.SELU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.n1fc1(bert_out)
        bert_out1 = self.relu(bert_out)
        bert_out1 = self.dropout(bert_out1)
        bert_out1 = self.n1fc2(bert_out1)

        bert_out2 = self.n2fc2(bert_out)
        return bert_out1, bert_out2


def do_train(args):
    print("开始读取数据")
    with open(f"./datas/{args.name}.txt", "r", encoding="utf-8") as f:
        datas = f.readlines()

    if args.data_num:
        datas = datas[:args.data_num]

    print("完成读取数据,数据长度为：", len(datas))

    datas = [x.strip() for x in datas]
    c1_labels = []
    c4_labels = []

    def get_label(c1, c4):
        if isinstance(c1, str):
            if c1 not in c1_labels:
                c1_labels.append(c1)
            if c4 not in c4_labels:
                c4_labels.append(c4)
            return c1_labels.index(c1), c4_labels.index(c4)
        else:
            return c1_labels[c1], c4_labels[c4]

    # 任务类型
    data_type = args.name

    if not os.path.exists(f'./{data_type}'):
        os.mkdir(f'./{data_type}')
    print("开始加载预训练模型")
    # 加载预训练模型
    if sys.platform.startswith('win'):
        pretrained = 'hfl/chinese-roberta-wwm-ext'
    else:
        pretrained = '/root/.../pretrained_models/hfl-chinese-roberta-wwm-ext'

    tokenizer = BertTokenizer.from_pretrained(pretrained)
    bert_model = BertModel.from_pretrained(pretrained)
    config = BertConfig.from_pretrained(pretrained)
    print("完成加载预训练模型")

    def get_train_test_data(max_length=args.max_length, test_size=0.2):
        texts = []
        labels = []

        for one in tqdm(datas):
            result = one.split("__")
            if len(result) != 3:
                continue

            text, c1, c4 = result
            try:
                lebal_index = get_label(c1.strip(), c4.strip())
                text = tokenizer.encode(text.strip(), max_length=max_length, padding="max_length",
                                        truncation="longest_first")
                texts.append(text)
                labels.append(lebal_index)
            except Exception as e:
                print(e)
                continue
        X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=test_size, random_state=0,
                                                            shuffle=True)
        return (X_train, y_train), (X_test, y_test)

    print("开始转换数据")
    (X_train, y_train), (X_test, y_test) = get_train_test_data(test_size=args.test_size)
    print("完成转换数据")
    with open(f"./{data_type}/{args.name}_c1.txt", "w", encoding="utf-8") as f:
        for c1 in c1_labels:
            f.write(f"{c1}\n")
    with open(f"./{data_type}/{args.name}_c4.txt", "w", encoding="utf-8") as f:
        for c4 in c4_labels:
            f.write(f"{c4}\n")

    c1_train_shuffle_label = []
    c4_train_shuffle_label = []
    for c1, c4 in y_train:
        c1_train_shuffle_label.append(c1)
        c4_train_shuffle_label.append(c4)

    c1_test_shuffle_label = []
    c4_test_shuffle_label = []
    for c1, c4 in y_test:
        c1_test_shuffle_label.append(c1)
        c4_test_shuffle_label.append(c4)

    batch_size = args.batch_size
    train_dataset = DataGen(X_train, c1_train_shuffle_label, c4_train_shuffle_label)
    test_dataset = DataGen(X_test, c1_test_shuffle_label, c4_test_shuffle_label)
    train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size)
    test_dataloader = data.DataLoader(test_dataset, batch_size=batch_size)
    # 是否使用小模型
    if args.use_small == 0:
        model = Model(bert_model, config, len(c1_labels), len(c4_labels))
    else:
        model = ModelSmall(bert_model, config, len(c1_labels), len(c4_labels))
    if args.pretrained:
        print("加载权重：", args.pretrained)
        model.load_state_dict(
            torch.load(os.path.join("roberta", args.pretrained), map_location=lambda storage, loc: storage)
        )
    device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
    model = model.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)

    for epoch in range(args.epoch):
        print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
        loss_sum = 0.0
        accu = 0
        accu1 = 0
        accu2 = 0
        model.train()
        for token_ids, c1, c4 in tqdm(train_dataloader):
            token_ids = token_ids.to(device).long()
            c1_label = c1.to(device).long()
            c4_label = c4.to(device).long()
            out = model(token_ids)
            loss1 = criterion(out[0], c1_label)
            loss4 = criterion(out[1], c4_label)
            loss = loss1 + loss4
            optimizer.zero_grad()
            loss.backward()  # 反向传播
            optimizer.step()  # 梯度更新
            loss_sum += loss.cpu().data.numpy()
            accu1 += (out[0].argmax(1) == c1_label).sum().cpu().data.numpy()
            accu2 += (out[1].argmax(1) == c4_label).sum().cpu().data.numpy()
            accu = (accu1 + accu2) / 2

        test_loss_sum = 0.0
        test_accu = 0
        test_accu1 = 0
        test_accu2 = 0
        model.eval()
        for token_ids, c1, c4 in tqdm(test_dataloader):
            token_ids = token_ids.to(device).long()
            c1_label = c1.to(device).long()
            c4_label = c4.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss1 = criterion(out[0], c1_label)
                loss4 = criterion(out[1], c4_label)
                loss = loss1 + loss4
                test_loss_sum += loss.cpu().data.numpy()
                test_accu1 += (out[0].argmax(1) == c1_label).sum().cpu().data.numpy()
                test_accu2 += (out[1].argmax(1) == c4_label).sum().cpu().data.numpy()
                test_accu = (test_accu1 + test_accu2) / 2

        print(
            f"epoch:{epoch}, train acc:{accu / len(train_dataset)} train loss:{loss_sum / len(train_dataset)}, test loss:{test_loss_sum / len(test_dataset)}")
        print(f"test c1 acc:{test_accu1 / len(test_dataset)}, test c2 acc:{test_accu2 / len(test_dataset)}")

        torch.save(model.state_dict(), f'./{data_type}/{data_type}_model_{epoch}_{test_accu / len(test_dataset)}.pt')


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", default="staplesc4c6", type=str, help="name of the data")
    parser.add_argument("-p", "--pretrained", type=str, help="以前训练过的模型权重，加载继续训练")
    parser.add_argument("-e", "--epoch", default=25, type=int, help="训练迭代次数")
    parser.add_argument("-b", "--batch_size", default=384, type=int, help="训练批次大小")
    parser.add_argument("-t", "--test_size", default=0.05, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-s", "--use_small", type=int, default=1, help="是否使用小模型来训练，1使用，0不适用，默认使用小模型", choices=[0, 1])
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    do_train(args)
