# encoding: utf-8

import sys
import os
import datetime
import argparse
import pickle
from tqdm import tqdm
import torch
from torch.utils import data
from transformers import BertModel, BertConfig

from models.model import DataGen, ModelExpert, ModelMoe


print("开始加载预训练模型")
# 加载预训练模型
if sys.platform.startswith('win'):
    pretrained = 'D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext'
else:
    pretrained = '/root/.../pretrained_models/hfl-chinese-roberta-wwm-ext'

bert_model = BertModel.from_pretrained(pretrained)
config = BertConfig.from_pretrained(pretrained)
print("完成加载预训练模型")

with open(f"./datas/train.pkl", "rb") as f:
    data_item = pickle.loads(f.read())
X_train = data_item.get("xtrain")
X_test = data_item.get("xtest")
y_train = data_item.get("ytrain")
y_test = data_item.get("ytest")
labels = data_item.get("label")

print("完成加载数据")

if not os.path.exists(f'./train'):
    os.mkdir(f'./train')

with open(f"./train/train.txt", "w", encoding="utf-8") as f:
    for c4 in labels:
        f.write(f"{c4}\n")


batch_size = 16
train_dataset = DataGen(X_train, y_train)
test_dataset = DataGen(X_test, y_test)
train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size)
test_dataloader = data.DataLoader(test_dataset, batch_size=batch_size)

model1 = ModelExpert(bert_model, config, len(labels))
model2 = ModelExpert(bert_model, config, len(labels))
model3 = ModelExpert(bert_model, config, len(labels))
model4 = ModelExpert(bert_model, config, len(labels))
model5 = ModelExpert(bert_model, config, len(labels))

weight1 = "./train_expert/train_model_0_0.91.pt"
weight2 = "./train_expert/train_model_1_0.91.pt"
weight3 = "./train_expert/train_model_2_0.90.pt"
weight4 = "./train_expert/train_model_3_0.89.pt"
weight5 = "./train_expert/train_model_4_0.90.pt"

print("开始加载专家模型")
model1.load_state_dict(torch.load(weight1, map_location=lambda storage, loc: storage))
model2.load_state_dict(torch.load(weight2, map_location=lambda storage, loc: storage))
model3.load_state_dict(torch.load(weight3, map_location=lambda storage, loc: storage))
model4.load_state_dict(torch.load(weight4, map_location=lambda storage, loc: storage))
model5.load_state_dict(torch.load(weight5, map_location=lambda storage, loc: storage))
print("完成加载专家模型")

model_moe = ModelMoe([model1, model2, model3, model4, model5])

device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
print(f"using device:", device)
model_moe = model_moe.to(device)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model_moe.parameters(), lr=0.0001, weight_decay=1e-4)

for epoch in range(20):
    print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
    loss_sum = 0.0
    accu = 0

    model_moe.train()
    for token_ids, c1 in tqdm(train_dataloader):
        token_ids = token_ids.to(device).long()
        c1_label = c1.to(device).long()
        out = model_moe(token_ids)
        loss = criterion(out, c1_label)
        optimizer.zero_grad()
        loss.backward()  # 反向传播
        optimizer.step()  # 梯度更新
        loss_sum += loss.cpu().data.numpy()
        accu += (out.argmax(1) == c1_label).sum().cpu().data.numpy()

    test_loss_sum = 0.0
    test_accu = 0

    model_moe.eval()
    for token_ids, c1 in tqdm(test_dataloader):
        token_ids = token_ids.to(device).long()
        c1_label = c1.to(device).long()
        with torch.no_grad():
            out = model_moe(token_ids)
            loss = criterion(out, c1_label)
            test_loss_sum += loss.cpu().data.numpy()
            test_accu += (out.argmax(1) == c1_label).sum().cpu().data.numpy()

    print(
        f"epoch:{epoch}, train acc:{accu / len(train_dataset)} train loss:{loss_sum / len(train_dataset)}, test loss:{test_loss_sum / len(test_dataset)}, test acc:{test_accu / len(test_dataset)}")

    torch.save(model_moe.state_dict(), f'./train/{model_moe}_model_{epoch}_{test_accu / len(test_dataset)}.pt')
