import os

import joblib
import transformers

from model.BertClassification import *
from utils import *

loss_function_name = 'ce'  # ce,f1
k_num = 10  # k折交叉验证
epoch_num = 25  # 训练次数
lr = 1e-5  # 学习率
vis_step = 10  # 每训练vis_step批数据打印一次loss
batch_size = 64
out_path = osp.join('out', loss_function_name)
data_path = osp.join('data', 'dataset.xlsx')
print(f'{k_num}折交叉验证，损失函数为{loss_function_name}')
transformers.logging.set_verbosity_error()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = BertClassification(device, class_num=2)  # 模型实例化

if loss_function_name == 'ce':
    loss_function = nn.CrossEntropyLoss().to(device=device)  # 定义损失函数，交叉熵损失函数（分类）
elif loss_function_name == 'f1':
    loss_function = F1_Loss().to(device=device)
else:
    raise ValueError(f'{loss_function_name} 无法确定！')

if not os.path.exists(out_path):
    os.makedirs(out_path)
    os.mkdir(osp.join(out_path, 'out_models'))
    os.mkdir(osp.join(out_path, 'bad_preds'))
    os.mkdir(osp.join(out_path, 'loss_imgs'))
torch.save(model.state_dict(), osp.join(out_path, 'out_models', 'primary_model_parameter.pkl'))
optimizer = torch.optim.Adam(model.parameters(), lr=lr)  # 这里使用ADAM，一种梯度下降的改进优化方法
best_F1 = 0
all_mean_loss = []
dataset = BuildKDatasets(k_num=k_num, data_path=data_path)
kfold_F1_scores, bad_preds_dfs = [], {}
for k, data in enumerate(dataset.get_kFold_dataset()):
    loss_data, macro_f1_data, neg_f1_data, pos_f1_data = [], [], [], []
    for epoch in range(epoch_num):  # 在每个子集上训练k次
        '''训练模块*********************************************************************'''
        model.train()
        data_loader = BertDataLoader(sentences_pairs=data['X_train'], sentences_labels=data['y_train'], batch_size=batch_size)
        print(f'K_num: {k + 1}/{k_num}, lr-->{lr}')
        epoch_loss, batches_loss = 0, 0  # 周期损失和批损失
        for i, (batch_pairs, batch_labels) in enumerate(data_loader):  # 刚才说了batch_count的意思有多少块（段），每段有64句话
            inputs = batch_pairs
            targets = torch.tensor(batch_labels).to(device)
            optimizer.zero_grad()  # 1.梯度置零
            outputs = model(inputs)  # 2.模型获得结果
            loss = loss_function(outputs, targets)  # loss_function(outputs, targets)  # 3.计算损失
            loss.backward()  # 4.反向传播
            optimizer.step()  # 5.修改参数，w，b
            batches_loss += loss.item()  # item()返回loss的值
            epoch_loss += loss.item()
            if i % vis_step == 0:
                print(f'Epoch: {(epoch + 1)}, lr: {lr}, batch: {i}, {loss_function_name}_loss: {round(batches_loss / vis_step, 3)}')
                batches_loss = 0
        loss_data.append(epoch_loss / len(data_loader))  # 计算周期平均损失
        print(f"Epoch: {epoch + 1}, mean_loss: {round(epoch_loss / len(data_loader), 3)}")
        '''评估模块*********************************************************************'''
        model.eval()
        macro_F1, neg_F1, pos_F1, bad_preds_df = evaluate(model=model,
                                                          sentences_pairs=data['X_test'],
                                                          sentences_labels=data['y_test'])
        macro_f1_data.append(macro_F1)
        neg_f1_data.append(neg_F1)
        pos_f1_data.append(pos_F1)
        print(f"Epoch: {epoch + 1}, F1: {round(macro_F1, 3)}")
        ShowLoss(title=f'bert_cls_{loss_function_name}_{k_num}cv_K{k + 1}_loss',
                 macro_F1_data=macro_f1_data,
                 neg_F1_data=neg_f1_data,
                 pos_F1_data=pos_f1_data,
                 loss_data=loss_data,
                 out_path=osp.join(out_path, 'loss_imgs'))
        if best_F1 < macro_F1:
            best_F1 = macro_F1
            bad_preds_dfs[k] = bad_preds_df
            joblib.dump(model, osp.join(out_path, 'out_models', f'bert_cls_{loss_function_name}_{k_num}cv.pkl'))
            print(f'Best model is saved at epoch {epoch + 1} with F1 {best_F1}.')
    write_to_excel(bad_preds_dfs, osp.join(out_path, 'bad_preds', f'bad_preds_{k}.xls'))
    model.load_state_dict(torch.load(osp.join(out_path, 'out_models', f'primary_model_parameter.pkl')))  # 模型权重参数重置
    kfold_F1_scores.append(best_F1)  # 第K折最佳F1分数
    joblib.dump(kfold_F1_scores, osp.join(out_path, 'loss_imgs', 'kfold_F1_scores.pkl'))
    print(f'第{k + 1}折的macro F1均分：{get_kFold_model_eval(kfold_F1_scores)}')
write_to_excel(bad_preds_dfs, osp.join(out_path, 'bad_preds', 'bad_preds.xls'))
