# -*- coding: utf-8 -*-
import logging
import os

import torch
import numpy as np

from model import XModel
from loader import XFileLoader

from evaluate import XEvaluate

from peft_tuning import XPeftTuning

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class ModelRun():
    def __init__(self, config):
        self.config = config

    def forward(self):
        epoch_num = self.config.get("epoch_num")  # 训练轮数
        # batch_size = Config.get("batch_size") # 每批的数据量
        # hidden_size = Config.get("hidden_size") # 隐藏层大小其实可以随便写，只要最后一层维度正确即可
        # embedding_dim = Config.get("embedding_dim") # 词向量维度
        # learning_rate = Config.get("learning_rate") # 学习率

        model_path_save = self.config.get("model_path_save")  # 保存模型位置,为空不保存

        dataLoader = XFileLoader(self.config)
        # 加载训练数据
        train_data = dataLoader.load_file_data(self.config["train_data_path"])
        valid_data = dataLoader.load_file_data("D:/dataset/新闻/valid_tag_news.csv")

        # 加载模型效果测试数据
        xEvaluate = XEvaluate(valid_data, self.config)

        # 定义模型
        model = XModel(self.config)

        # 微调
        model = XPeftTuning(self.config).getModel(model)

        # 优化器
        optim = self.choose_optimizer(model)
        for epoch in range(epoch_num):
            epoch += 1
            model.train()
            watch_loss = []
            logger.info("epoch %d begin" % epoch)

            for index, batch_data in enumerate(train_data):
                optim.zero_grad()
                x, y = batch_data[0]
                loss = model.forward(x, y.view(-1))  # 计算损失
                loss.backward()  # 计算梯度
                optim.step()  # 更新权重
                optim.zero_grad()  # 梯度归零
                watch_loss.append(loss.item())
            logger.info("第%d轮平均loss:%f" % (epoch, np.mean(watch_loss)))
            logger.info("测试本轮模型效果")
            xEvaluate.eval(model)
            # 保存模型
            if model_path_save is not None and model_path_save != "":
                save_path = os.path.join(os.getcwd() + "/" + model_path_save, "epoch_%d.pth" % epoch)
                if self.config["only_save_last"] == 1:
                    if epoch == epoch_num:
                        # torch.save(model.state_dict(), save_path)
                        self.save_model(model,save_path)
                else:
                    # torch.save(model.state_dict(), save_path)
                    self.save_model(model, save_path)

    def save_model(self, model, path):
        saved_params = {
            k: v.to("cpu")
            for k, v in model.named_parameters()
            if v.requires_grad
        }
        torch.save(saved_params, path)

    def choose_optimizer(self, model):
        optimizer = self.config["optimizer"]
        learning_rate = self.config["learning_rate"]
        if optimizer == "adam":
            return torch.optim.Adam(model.parameters(), lr=learning_rate)
        elif optimizer == "sgd":
            return torch.optim.SGD(model.parameters(), lr=learning_rate)


if __name__ == '__main__':
    from config import Config

    ModelRun(Config).forward()
