"""
训练和测试模块
"""
import time
import numpy as np
import torch
import os
from config import Config
from data_loader import DataLoader
from model_definitions import create_model
from utils import format_time, create_train_test_split

class Trainer:
    """训练器类"""
    
    def __init__(self, config=None):
        self.config = config or Config()
        
        # 设置GPU
        torch.cuda.set_device(self.config.GPU_ID)
        print(f"使用GPU: {torch.cuda.get_device_name(self.config.GPU_ID)}")
        
        self.data_loader = DataLoader(self.config, sliding_step=getattr(self.config, 'SLIDING_STEP', 5))
        self.model, self.criterion, self.optimizer = create_model(self.config)
        
        # 将模型移动到指定GPU
        self.model = self.model.to(self.config.DEVICE)
        self.criterion = self.criterion.to(self.config.DEVICE)
        
        # 创建学习率调度器
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer,
            mode='min',
            factor=self.config.LR_SCHEDULER['factor'],
            patience=self.config.LR_SCHEDULER['patience'],
            min_lr=self.config.LR_SCHEDULER['min_lr'],
            verbose=self.config.LR_SCHEDULER['verbose']
        )
        
        # 打印滑动窗口信息
        self.data_loader.print_sliding_info()
        
        # 记录最佳损失和模型
        self.best_loss = float('inf')
        self.best_epoch = 0
        self.best_model_path = os.path.join("dync", "model_best.pth")
        
        # 初始化时间统计
        self.epoch_times = []
        self.total_training_time = 0
        self.total_overhead_time = 0
        
        # 确保保存目录存在
        os.makedirs("dync", exist_ok=True)
    
    def train(self):
        """训练模型"""
        if not self.config.FORCE_RETRAIN:
            print("加载预训练模型...")
            try:
                self.model = torch.load(self.config.PRETRAINED_MODEL_PATH)
                self.model.eval()
                print("预训练模型加载成功")
                return
            except Exception as e:
                print(f"加载预训练模型失败: {e}")
                print("开始重新训练...")
        
        print("开始训练...")
        start_time = time.time()
        loss_values = []
        
        for epoch in range(self.config.EPOCHS):
            # 记录epoch开始时间
            epoch_start_time = time.time()
            
            # 训练阶段
            self.model.train()
            total_loss = 0
            training_start_time = time.time()
            
            # 使用数据加载器迭代训练数据
            for batch_data in self.data_loader.get_train_loader():
                # 将数据移动到GPU
                batch_data = batch_data.to(self.config.DEVICE)
                
                # 前向传播
                Y_reconstructed, X = self.model(batch_data)
                
                # 计算损失
                loss = self.criterion(Y_reconstructed, batch_data, X)
                
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                
                total_loss += loss.item()
            
            # 计算训练时间
            training_time = time.time() - training_start_time
            
            # 计算平均损失
            avg_loss = total_loss / len(self.data_loader.get_train_loader())
            loss_values.append(avg_loss)
            
            # 更新学习率
            scheduler_start_time = time.time()
            self.scheduler.step(avg_loss)
            scheduler_time = time.time() - scheduler_start_time
            
            # 记录最佳模型
            save_time = 0
            if avg_loss < self.best_loss:
                save_start_time = time.time()
                self.best_loss = avg_loss
                self.best_epoch = epoch
                # 保存最佳模型（覆盖之前的文件）
                torch.save(self.model, self.best_model_path)
                save_time = time.time() - save_start_time
                print(f"\n发现更好的模型! Epoch {epoch + 1}, 损失: {avg_loss:.6f}")
            
            # 计算总开销时间
            overhead_time = scheduler_time + save_time
            
            # 计算总epoch时间
            epoch_time = time.time() - epoch_start_time
            
            # 更新统计
            self.epoch_times.append(epoch_time)
            self.total_training_time += training_time
            self.total_overhead_time += overhead_time
            
            # 计算总用时
            elapsed_time = time.time() - start_time
            
            # 打印进度
            if (epoch + 1) % 10 == 0 or epoch == 0:
                current_lr = self.optimizer.param_groups[0]['lr']
                print(f"Epoch [{epoch + 1}/{self.config.EPOCHS}], "
                      f"平均损失: {avg_loss:.6f}, "
                      f"学习率: {current_lr:.6f}")
                print(f"时间统计:")
                print(f"  训练时间: {format_time(training_time)}")
                print(f"  开销时间: {format_time(overhead_time)}")
                print(f"  本轮总用时: {format_time(epoch_time)}")
                print(f"  累计训练时间: {format_time(self.total_training_time)}")
                print(f"  累计开销时间: {format_time(self.total_overhead_time)}")
                print(f"  总用时: {format_time(elapsed_time)}")
        
        # 保存最终模型（如果是最佳模型）
        if self.best_epoch == self.config.EPOCHS - 1:
            torch.save(self.model, self.config.MODEL_SAVE_PATH)
            print(f"\n最终模型已保存到: {self.config.MODEL_SAVE_PATH}")
        else:
            print(f"\n最佳模型已保存在: {self.best_model_path}")
        
        print(f"最佳模型在epoch {self.best_epoch + 1}，损失为: {self.best_loss:.6f}")
        print(f"\n时间统计总结:")
        print(f"总训练时间: {format_time(self.total_training_time)}")
        print(f"总开销时间: {format_time(self.total_overhead_time)}")
        print(f"平均每轮训练时间: {format_time(np.mean(self.epoch_times))}")
        print(f"平均每轮开销时间: {format_time(self.total_overhead_time / len(self.epoch_times))}")
        
        return loss_values
    
    def test(self):
        """测试模型"""
        print("\n开始测试...")
        self.model.eval()
        
        test_loss = 0
        rmse_list = []
        
        with torch.no_grad():
            for i, batch_data in enumerate(self.data_loader.get_test_loader()):
                # 将数据移动到GPU
                batch_data = batch_data.to(self.config.DEVICE)
                
                print(f"测试批次 {i+1}/{len(self.data_loader.get_test_loader())} - batch_data shape: {batch_data.shape}")
                
                # 模型预测
                Y_reconstructed, X = self.model(batch_data)
                
                # 计算损失
                loss = self.criterion(Y_reconstructed, batch_data, X).item()
                test_loss += loss
                
                # 计算RMSE
                rmse_batch = []
                for t in range(batch_data.shape[2]):  # 遍历时间步
                    mse_val = torch.mean((Y_reconstructed[:, :, t] - batch_data[:, :, t]) ** 2).item()
                    rmse_val = np.sqrt(mse_val)
                    rmse_batch.append(rmse_val)
                
                rmse_list.append(rmse_batch)
        
        # 打印测试结果
        self._print_test_results(rmse_list, test_loss)
        
        return rmse_list, test_loss
    
    def evaluate_sequence(self, start_idx=1500):
        """评估序列数据"""
        print(f"\n评估序列数据 (起始索引: {start_idx})...")
        
        # 加载序列数据
        Y_tensor = self.data_loader.load_sequence_data(start_idx)
        
        self.model.eval()
        with torch.no_grad():
            Y_reconstructed, X = self.model(Y_tensor)
        
        # 计算重构误差
        mse_total = self.criterion(Y_reconstructed, Y_tensor, X).item()
        
        # 计算RMSE
        Y_reconstructed_squeezed = Y_reconstructed.squeeze(0)
        Y_tensor_squeezed = Y_tensor.squeeze(0)
        rmse = torch.sqrt(torch.mean((Y_reconstructed_squeezed - Y_tensor_squeezed) ** 2)).item()
        
        print(f"序列重构MSE: {mse_total:.6f}")
        print(f"序列RMSE: {rmse:.6f}")
        
        # 分析稀疏表示
        self._analyze_sparse_representation(X.squeeze(0))
        
        return Y_reconstructed, X, mse_total, rmse
    
    def _print_test_results(self, rmse_list, test_loss):
        """打印测试结果"""
        print("\n测试集RMSE结果：")
        for i, rmse in enumerate(rmse_list):
            print(f"测试批次 {i+1} 的RMSE:")
            for j, value in enumerate(rmse):
                print(f"  时间步 {j+1}: {value:.6f}")
        
        # 计算平均RMSE
        avg_rmse = np.mean([val for batch in rmse_list for val in batch])
        print(f"\n测试集平均RMSE: {avg_rmse:.6f}")
        print(f"测试集平均MSE: {test_loss / len(self.data_loader.get_test_loader()):.6f}")
    
    def _analyze_sparse_representation(self, X):
        """分析稀疏表示"""
        print("\n稀疏表示分析:")
        
        # 计算每列的和
        column_sums = torch.sum(X, dim=0)
        print("每列的和:", [f"{float(val):.6f}" for val in column_sums])
        
        # 分析非零元素
        nonzero_indices_list = []
        nonzero_values_list = []
        
        for col in range(X.size(1)):
            col_tensor = X[:, col]
            non_zero_mask = col_tensor != 0
            nonzero_indices = torch.nonzero(non_zero_mask, as_tuple=False).squeeze(1).tolist()
            nonzero_indices_list.append(nonzero_indices)
            
            values_tensor = torch.round(col_tensor[non_zero_mask], decimals=3)
            formatted_values = [round(float(v), 3) for v in values_tensor]
            nonzero_values_list.append(formatted_values)
        
        # 打印每列的非零位置和值
        for col_idx, (indices, values) in enumerate(zip(nonzero_indices_list, nonzero_values_list)):
            print(f"\n时间步 {col_idx + 1}:")
            print(f"非零位置序号: {indices}")
            print(f"对应的值: {values}")
    
    def get_model(self):
        """获取模型"""
        return self.model
    
    def save_model(self, path=None):
        """保存模型"""
        save_path = path or self.config.MODEL_SAVE_PATH
        torch.save(self.model, save_path)
        print(f"模型已保存到: {save_path}")
    
    def load_model(self, path=None):
        """加载模型"""
        load_path = path or self.config.PRETRAINED_MODEL_PATH
        try:
            self.model = torch.load(load_path)
            self.model.eval()
            print(f"模型已从 {load_path} 加载")
        except Exception as e:
            print(f"加载模型失败: {e}")

def main():
    """主函数"""
    # 创建配置
    config = Config()
    
    # 设置随机种子
    from utils import set_seed
    set_seed(config.SEED)
    
    # 创建训练器
    trainer = Trainer(config)
    
    # 训练模型
    loss_values = trainer.train()
    
    # 测试模型
    rmse_list, test_loss = trainer.test()
    
    # 评估序列数据
    Y_reconstructed, X, mse_total, rmse = trainer.evaluate_sequence()
    
    print("\n训练和测试完成！")

if __name__ == "__main__":
    main() 