# 指定文件编码为UTF-8
# coding: utf-8

"""
trainer.py 功能说明：
1. 实现神经网络训练的通用训练器类
2. 支持多种优化算法(SGD, Momentum, Adam等)
3. 提供训练过程监控和评估功能
4. 支持批量训练和周期性评估

主要功能：
- 自动执行训练迭代
- 记录训练损失和准确率
- 定期评估测试集性能
- 支持多种优化器配置
- 提供训练进度输出

训练流程：
1. 初始化训练器(配置网络、数据、超参数等)
2. 调用train()方法开始训练
3. 自动执行指定epoch数量的训练
4. 每epoch评估训练集和测试集准确率
5. 训练完成后输出最终测试准确率
"""

import sys, os
sys.path.append(os.pardir)  # 添加父目录到系统路径
import numpy as np
from common.optimizer import *  # 导入优化器

class Trainer:
    """神经网络训练器类"""
    def __init__(self, network, x_train, t_train, x_test, t_test,
                 epochs=20, mini_batch_size=100,
                 optimizer='SGD', optimizer_param={'lr':0.01},
                 evaluate_sample_num_per_epoch=None, verbose=True):
        """初始化训练器

        参数:
            network: 神经网络实例
            x_train: 训练数据
            t_train: 训练标签
            x_test: 测试数据
            t_test: 测试标签
            epochs: 训练轮数
            mini_batch_size: 批量大小
            optimizer: 优化器类型
            optimizer_param: 优化器参数
            evaluate_sample_num_per_epoch: 每epoch评估样本数
            verbose: 是否打印训练信息
        """
        self.network = network
        self.verbose = verbose
        self.x_train = x_train
        self.t_train = t_train
        self.x_test = x_test
        self.t_test = t_test
        self.epochs = epochs
        self.batch_size = mini_batch_size
        self.evaluate_sample_num_per_epoch = evaluate_sample_num_per_epoch

        # 优化器配置
        optimizer_class_dict = {
            'sgd':SGD,
            'momentum':Momentum,
            'nesterov':Nesterov,
            'adagrad':AdaGrad,
            'rmsprpo':RMSprop,
            'adam':Adam
        }
        self.optimizer = optimizer_class_dict[optimizer.lower()](**optimizer_param)

        # 训练参数计算
        self.train_size = x_train.shape[0]
        self.iter_per_epoch = max(self.train_size / mini_batch_size, 1)
        self.max_iter = int(epochs * self.iter_per_epoch)
        self.current_iter = 0
        self.current_epoch = 0

        # 训练记录
        self.train_loss_list = []
        self.train_acc_list = []
        self.test_acc_list = []

    def train_step(self):
        """执行单次训练迭代"""
        # 随机选择批量数据
        batch_mask = np.random.choice(self.train_size, self.batch_size)
        x_batch = self.x_train[batch_mask]
        t_batch = self.t_train[batch_mask]

        # 计算梯度并更新参数
        grads = self.network.gradient(x_batch, t_batch)
        self.optimizer.update(self.network.params, grads)

        # 记录损失
        loss = self.network.loss(x_batch, t_batch)
        self.train_loss_list.append(loss)
        if self.verbose:
            print("train loss:" + str(loss))

        # 每个epoch结束时评估
        if self.current_iter % self.iter_per_epoch == 0:
            self.current_epoch += 1

            # 准备评估数据
            x_train_sample, t_train_sample = self.x_train, self.t_train
            x_test_sample, t_test_sample = self.x_test, self.t_test
            if not self.evaluate_sample_num_per_epoch is None:
                t = self.evaluate_sample_num_per_epoch
                x_train_sample, t_train_sample = self.x_train[:t], self.t_train[:t]
                x_test_sample, t_test_sample = self.x_test[:t], self.t_test[:t]

            # 计算准确率
            train_acc = self.network.accuracy(x_train_sample, t_train_sample)
            test_acc = self.network.accuracy(x_test_sample, t_test_sample)
            self.train_acc_list.append(train_acc)
            self.test_acc_list.append(test_acc)

            if self.verbose:
                print("=== epoch:" + str(self.current_epoch) +
                     ", train acc:" + str(train_acc) +
                     ", test acc:" + str(test_acc) + " ===")
        self.current_iter += 1

    def train(self):
        """执行完整训练过程"""
        for i in range(self.max_iter):
            self.train_step()

        # 最终评估
        test_acc = self.network.accuracy(self.x_test, self.t_test)

        if self.verbose:
            print("=============== Final Test Accuracy ===============")
            print("test acc:" + str(test_acc))

"""
使用示例：
1. 初始化训练器:
   trainer = Trainer(network, x_train, t_train, x_test, t_test,
                    epochs=20, mini_batch_size=100,
                    optimizer='Adam', optimizer_param={'lr':0.001},
                    evaluate_sample_num_per_epoch=1000)
2. 开始训练:
   trainer.train()

训练过程输出:
- 每次迭代的训练损失
- 每个epoch的训练和测试准确率
- 最终测试准确率

注意事项:
1. 确保网络实现了gradient()和accuracy()方法
2. 可根据需要调整批量大小和学习率
3. 大数据集可设置evaluate_sample_num_per_epoch加速评估
"""
