import os
import time

import numpy as np
from matplotlib import pyplot as plt

from core.config import no_grad
from core.cuda import cuda_module, cuda
from core.data.dataloader import DataLoader
from utils.common import accuracy, clip_grads


class Trainer:
    def __init__(self, model, optimizer):
        """
        Trainer 类用于训练神经网络模型。

        Args:
            model: 待训练的模型。
            optimizer: 优化器。
        """
        self.test_loader = None
        self.train_loader = None
        self.test_set = None
        self.train_set = None
        self.model = model
        self.optimizer = optimizer
        self.loss_list = []  # 记录每个 epoch 的平均损失
        self.current_epoch = 0

    def load_weights(self, file_path):
        """
        加载权重文件。

        Args:
            file_path: 文件地址。
        """
        if os.path.exists(file_path):
            self.model.load_weights(file_path)
        else:
            print(f"{file_path} doesn't exist")

    def save_weights(self, file_path):
        """
        保存权重文件。

        Args:
            file_path: 文件地址。
        """
        self.model.save_weights(file_path)

    def prepare_data(self, train_set, test_set, batch_size=100, shuffle=False):
        self.train_set = train_set
        self.test_set = test_set
        self.train_loader = DataLoader(train_set, batch_size)
        self.test_loader = DataLoader(test_set, batch_size, shuffle=shuffle)
        if cuda.gpu_enable:
            self.train_loader.to_gpu()
            self.test_loader.to_gpu()
            self.model.to_gpu()

    def fit(self, loss_function, max_epoch=20):

        model, optimizer = self.model, self.optimizer

        for epoch in range(max_epoch):
            sum_loss, sum_acc = 0, 0

            for x, t in self.train_loader:
                y = model(x)
                loss = loss_function(y, t)
                acc = accuracy(y, t)
                model.cleargrads()
                loss.backward()
                optimizer.update()
                sum_loss += float(loss.data) * len(t)
                sum_acc += float(acc.data) * len(t)

            print('epoch: {}'.format(epoch + 1))
            avg_loss = sum_loss / len(self.train_set)
            print('train loss: {}, accuracy: {}'.format(
                avg_loss, sum_acc / len(self.train_set)))

            self.loss_list.append(float(avg_loss))

            sum_loss, sum_acc = 0, 0
            with no_grad():
                for x, t in self.test_loader:
                    y = model(x)
                    loss = loss_function(y, t)
                    acc = accuracy(y, t)
                    sum_loss += float(loss.data) * len(t)
                    sum_acc += float(acc.data) * len(t)

            print('test loss: {}, accuracy: {}'.format(
                sum_loss / len(self.test_set), sum_acc / len(self.test_set)))

    def plot(self, ylim=None):
        """
        绘制损失曲线图。

        Args:
            ylim (tuple, optional): y 轴的限制范围。默认为 None。
        """
        x = cuda_module.arange(len(self.loss_list))
        if ylim is not None:
            plt.ylim(*ylim)
        plt.plot(x, cuda.to_array(self.loss_list), label='train')
        plt.xlabel('epoch')
        plt.ylabel('loss')
        plt.show()

def remove_duplicate(params, grads):
    '''
    将参数列表中重复的权重整合为1个，
    加上与该权重对应的梯度
    '''
    params, grads = params[:], grads[:]  # copy list

    while True:
        find_flg = False
        L = len(params)

        for i in range(0, L - 1):
            for j in range(i + 1, L):
                # 在共享权重的情况下
                if params[i] is params[j]:
                    grads[i] += grads[j]  # 加上梯度
                    find_flg = True
                    params.pop(j)
                    grads.pop(j)
                # 在作为转置矩阵共享权重的情况下（weight tying）
                elif params[i].ndim == 2 and params[j].ndim == 2 and \
                     params[i].T.shape == params[j].shape and np.all(params[i].T == params[j]):
                    grads[i] += grads[j].T
                    find_flg = True
                    params.pop(j)
                    grads.pop(j)

                if find_flg: break
            if find_flg: break

        if not find_flg: break

    return params, grads
