import os
import time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from colorama import Fore, Style, init
from tabulate import tabulate
from datetime import datetime
import psutil
import torch
import gc
import shutil
import glob
from torch.utils.tensorboard import SummaryWriter

# 初始化 colorama
init(autoreset=True)

class DogBreedTrainer:
    def __init__(self, model_name, epochs, save_dir="./runs", model_dir="./model"):
        self.model_name = model_name
        self.epochs = epochs
        self.start_time = time.time()
        self.metrics_history = {
            'train_loss': [],
            'val_loss': [],
            'train_acc': [],
            'val_acc': [],
            'learning_rate': [],
            'epoch_time': []
        }
        self.best_val_acc = 0
        self.best_epoch = 0
        self.early_stop_counter = 0
        self.model_dir = model_dir
        
        # 确保model文件夹存在
        os.makedirs(model_dir, exist_ok=True)
        
        # 清理runs文件夹
        if os.path.exists(save_dir):
            shutil.rmtree(save_dir)
        os.makedirs(save_dir, exist_ok=True)
        
        # 获取训练次数
        train_count = self._get_next_train_count()
        self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.run_name = f"第{train_count}次训练_{model_name}_{self.timestamp}"
        self.save_dir = os.path.join(save_dir, self.run_name)
        os.makedirs(self.save_dir, exist_ok=True)
        
        # 初始化TensorBoard
        self.writer = SummaryWriter(log_dir=self.save_dir)
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 显示初始化信息
        self._print_header()
        self._log_system_info()

    def _get_next_train_count(self):
        """获取下一次训练的序号"""
        # 检查历史训练记录文件
        history_file = "training_history.txt"
        if os.path.exists(history_file):
            with open(history_file, 'r') as f:
                try:
                    count = int(f.read().strip())
                    count += 1
                except:
                    count = 1
        else:
            count = 1
        
        # 更新训练记录
        with open(history_file, 'w') as f:
            f.write(str(count))
        
        return count

    def _log_system_info(self):
        """记录系统信息"""
        system_info = {
            "CPU使用率": f"{psutil.cpu_percent()}%",
            "内存使用率": f"{psutil.virtual_memory().percent}%",
            "可用内存": f"{psutil.virtual_memory().available / (1024**3):.2f} GB",
            "总内存": f"{psutil.virtual_memory().total / (1024**3):.2f} GB"
        }
        
        if torch.cuda.is_available():
            system_info["GPU设备"] = torch.cuda.get_device_name(0)
            system_info["GPU显存总量"] = f"{torch.cuda.get_device_properties(0).total_memory / (1024**3):.2f} GB"
            system_info["GPU显存使用"] = f"{torch.cuda.memory_allocated(0) / (1024**3):.2f} GB"
            system_info["GPU显存占用率"] = f"{torch.cuda.memory_allocated(0) / torch.cuda.get_device_properties(0).total_memory * 100:.2f}%"
        
        print(f"\n{Fore.CYAN}{Style.BRIGHT}🖥️ 系统资源信息 🖥️{Style.RESET_ALL}\n")
        system_table = [[k, v] for k, v in system_info.items()]
        print(tabulate(system_table, headers=["指标", "数值"], tablefmt="fancy_grid"))

    def _print_header(self):
        """打印训练开始的标题"""
        print("\n" + "="*80)
        print(f"{Fore.CYAN}{Style.BRIGHT}✨ 狗品种识别训练任务 ✨{Style.RESET_ALL}".center(90))
        print(f"{Fore.YELLOW}模型: {Style.BRIGHT}{self.model_name}{Style.RESET_ALL}".center(80))
        # print(f"{Fore.YELLOW}开始时间: {Style.BRIGHT}{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}{Style.RESET_ALL}".center(95))
        print(f"{Fore.YELLOW}保存路径: {Style.BRIGHT}{self.save_dir}{Style.RESET_ALL}".center(90))
        print("="*80 + "\n")

    def data_summary(self, train_dataset, val_dataset):
        """展示数据集信息"""
        print(f"\n{Fore.BLUE}{Style.BRIGHT}📊 数据集摘要 📊{Style.RESET_ALL}\n")
        
        # 获取数据集信息
        train_dataset = train_dataset.dataset
        val_dataset = val_dataset.dataset
        
        # 对于使用random_split的情况，需要获取原始数据集
        if hasattr(train_dataset, 'dataset'):
            original_dataset = train_dataset.dataset
            classes = original_dataset.classes
            class_to_idx = original_dataset.class_to_idx
        else:
            classes = train_dataset.classes
            class_to_idx = train_dataset.class_to_idx
        
        data_info = {
            "训练集大小": len(train_dataset),
            "验证集大小": len(val_dataset),
            "类别数量": len(classes),
            "图像大小": "224x224",
            "数据增强": "随机水平翻转、随机旋转"
        }
        
        table_data = [[k, str(v)] for k, v in data_info.items()]
        print(tabulate(table_data, headers=["指标", "数值"], tablefmt="fancy_grid"))
        
        # 打印类别信息
        print("\n📑 类别信息:\n")
        class_info = [[class_name, idx] for class_name, idx in class_to_idx.items()]
        print(tabulate(class_info, headers=["类别名称", "索引"], tablefmt="grid"))

    def model_summary(self, model):
        """展示模型结构摘要"""
        print(f"\n{Fore.MAGENTA}{Style.BRIGHT}🏗️ 模型架构摘要 🏗️{Style.RESET_ALL}\n")
        
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        
        model_info = [
            ["模型类型", "VGG16 (迁移学习)"],
            ["总参数量", f"{total_params:,}"],
            ["可训练参数", f"{trainable_params:,}"],
            ["非训练参数", f"{total_params - trainable_params:,}"],
            ["预训练模型", "ImageNet"],
            ["优化器", "Adam"],
            ["损失函数", "CrossEntropyLoss"]
        ]
        
        print(tabulate(model_info, headers=["属性", "值"], tablefmt="fancy_grid"))

    def train(self, model, train_loader, val_loader, criterion, optimizer, scheduler=None):
        """训练模型"""
        print("\n🚀 开始训练 🚀\n")
        
        best_val_acc = 0
        train_steps = len(train_loader)
        
        for epoch in range(1, self.epochs + 1):
            # 训练阶段
            model.train()
            train_loss = 0
            train_correct = 0
            train_total = 0
            
            train_pbar = tqdm(train_loader, desc=f'Epoch {epoch}/{self.epochs} [Train]')
            for batch_idx, (inputs, targets) in enumerate(train_pbar):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()
                
                if scheduler is not None:
                    scheduler.step()  # 更新学习率
                
                train_loss += loss.item()
                _, predicted = outputs.max(1)
                train_total += targets.size(0)
                train_correct += predicted.eq(targets).sum().item()
                
                # 更新进度条
                train_acc = 100. * train_correct / train_total
                current_lr = optimizer.param_groups[0]['lr']
                train_pbar.set_postfix({
                    'loss': f'{train_loss/(batch_idx+1):.3f}',
                    'acc': f'{train_acc:.2f}%',
                    'lr': f'{current_lr:.6f}'
                })
                
                # 清理内存
                del inputs, targets, outputs
                torch.cuda.empty_cache()
            
            # 计算训练指标
            train_loss = train_loss / train_steps
            train_acc = 100. * train_correct / train_total
            
            # 验证阶段
            model.eval()
            val_loss = 0
            val_correct = 0
            val_total = 0
            
            with torch.no_grad():
                val_pbar = tqdm(val_loader, desc=f'Epoch {epoch}/{self.epochs} [Val]')
                for inputs, targets in val_pbar:
                    inputs, targets = inputs.to(self.device), targets.to(self.device)
                    
                    outputs = model(inputs)
                    loss = criterion(outputs, targets)
                    
                    val_loss += loss.item()
                    _, predicted = outputs.max(1)
                    val_total += targets.size(0)
                    val_correct += predicted.eq(targets).sum().item()
                    
                    # 更新进度条
                    val_acc = 100. * val_correct / val_total
                    val_pbar.set_postfix({
                        'loss': f'{val_loss/(batch_idx+1):.3f}',
                        'acc': f'{val_acc:.2f}%'
                    })
                    
                    # 清理内存
                    del inputs, targets, outputs
                    torch.cuda.empty_cache()
            
            # 计算验证指标
            val_loss = val_loss / len(val_loader)
            val_acc = 100. * val_correct / val_total
            
            # 记录到TensorBoard
            self.writer.add_scalar('Loss/train', train_loss, epoch)
            self.writer.add_scalar('Loss/val', val_loss, epoch)
            self.writer.add_scalar('Accuracy/train', train_acc, epoch)
            self.writer.add_scalar('Accuracy/val', val_acc, epoch)
            self.writer.add_scalar('Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
            
            # 打印epoch结果
            epoch_info = [
                ["训练损失", f"{train_loss:.4f}"],
                ["训练准确率", f"{train_acc:.2f}%"],
                ["验证损失", f"{val_loss:.4f}"],
                ["验证准确率", f"{val_acc:.2f}%"],
                ["学习率", f"{optimizer.param_groups[0]['lr']:.6f}"]
            ]
            
            print(f"\nEpoch {epoch} 结果:")
            print(tabulate(epoch_info, headers=["指标", "数值"], tablefmt="grid"))
            
            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                checkpoint = {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'scheduler_state_dict': scheduler.state_dict() if scheduler is not None else None,
                    'train_loss': train_loss,
                    'val_loss': val_loss,
                    'train_acc': train_acc,
                    'val_acc': val_acc
                }
                # 保存到runs文件夹中的训练记录
                torch.save(checkpoint, os.path.join(self.save_dir, 'best_model.pth'))
                # 同时保存到model文件夹中
                torch.save(checkpoint, os.path.join(self.model_dir, 'best_model.pth'))
                print(f"\n✨ 保存最佳模型到 {self.save_dir} 和 {self.model_dir} (验证准确率: {val_acc:.2f}%)")
            
            # 清理内存
            gc.collect()
            torch.cuda.empty_cache()
        
        print("\n🎉 训练完成! 🎉")
        print(f"最佳验证准确率: {best_val_acc:.2f}%")
        print(f"模型和日志保存在: {self.save_dir}")

    def train_epoch(self, epoch, model, train_loader, criterion, optimizer, device):
        """训练一个epoch"""
        model.train()
        total_loss = 0
        correct = 0
        total = 0
        
        pbar = tqdm(train_loader, desc=f"{Fore.CYAN}训练", 
                   bar_format="{l_bar}{bar:30}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]",
                   ncols=100)
        
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
            
            # 更新进度条
            pbar.set_postfix({
                'loss': f'{total_loss/len(train_loader):.4f}',
                'acc': f'{100.*correct/total:.2f}%'
            })
        
        return total_loss/len(train_loader), correct/total

    def validate(self, model, val_loader, criterion, device):
        """验证模型"""
        model.eval()
        total_loss = 0
        correct = 0
        total = 0
        
        pbar = tqdm(val_loader, desc=f"{Fore.YELLOW}验证", 
                   bar_format="{l_bar}{bar:30}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]",
                   ncols=100)
        
        with torch.no_grad():
            for images, labels in pbar:
                images, labels = images.to(device), labels.to(device)
                
                outputs = model(images)
                loss = criterion(outputs, labels)
                
                total_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()
                
                # 更新进度条
                pbar.set_postfix({
                    'loss': f'{total_loss/len(val_loader):.4f}',
                    'acc': f'{100.*correct/total:.2f}%'
                })
        
        return total_loss/len(val_loader), correct/total

    def update_metrics(self, epoch, metrics):
        """更新并显示训练指标"""
        # 保存指标历史
        for key, value in metrics.items():
            if key in self.metrics_history:
                self.metrics_history[key].append(value)
        
        # 记录最佳模型
        if metrics['val_acc'] > self.best_val_acc:
            self.best_val_acc = metrics['val_acc']
            self.best_epoch = epoch
            improved = f"{Fore.GREEN}✓ 最佳模型!{Style.RESET_ALL}"
            self.early_stop_counter = 0
        else:
            improved = ""
            self.early_stop_counter += 1
        
        # 显示当前轮次的指标
        epoch_data = [
            ["训练损失", f"{Fore.CYAN}{metrics['train_loss']:.4f}{Style.RESET_ALL}"],
            ["验证损失", f"{Fore.YELLOW}{metrics['val_loss']:.4f}{Style.RESET_ALL}"],
            ["训练准确率", f"{Fore.CYAN}{metrics['train_acc']*100:.2f}%{Style.RESET_ALL}"],
            ["验证准确率", f"{Fore.YELLOW}{metrics['val_acc']*100:.2f}%{Style.RESET_ALL}"],
            ["学习率", f"{metrics['lr']:.6f}"]
        ]
        
        print(f"\n{Fore.GREEN}{Style.BRIGHT}📈 第 {epoch+1} 轮指标 📈{Style.RESET_ALL} {improved}")
        print(tabulate(epoch_data, headers=["指标", "数值"], tablefmt="fancy_grid"))
        print(f"{Fore.YELLOW}最佳验证准确率: {self.best_val_acc*100:.2f}% (轮次 {self.best_epoch+1}){Style.RESET_ALL}")

    def plot_metrics(self):
        """绘制训练指标图表"""
        plt.figure(figsize=(12, 4))
        
        # 损失曲线
        plt.subplot(1, 2, 1)
        plt.plot(self.metrics_history['train_loss'], label='训练损失')
        plt.plot(self.metrics_history['val_loss'], label='验证损失')
        plt.title('损失曲线')
        plt.xlabel('轮次')
        plt.ylabel('损失')
        plt.legend()
        
        # 准确率曲线
        plt.subplot(1, 2, 2)
        plt.plot(self.metrics_history['train_acc'], label='训练准确率')
        plt.plot(self.metrics_history['val_acc'], label='验证准确率')
        plt.title('准确率曲线')
        plt.xlabel('轮次')
        plt.ylabel('准确率')
        plt.legend()
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.save_dir, 'training_metrics.png'))
        plt.close()
        
        print(f"\n{Fore.GREEN}✓ 训练指标图表已保存到: {self.save_dir}/training_metrics.png{Style.RESET_ALL}")

    def save_checkpoint(self, model, optimizer, epoch, metrics, is_best=False):
        """保存检查点"""
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'metrics': metrics,
            'best_val_acc': self.best_val_acc
        }
        
        # 保存最新的检查点
        torch.save(checkpoint, os.path.join(self.save_dir, 'latest_checkpoint.pth'))
        
        # 如果是最佳模型，单独保存
        if is_best:
            # 保存到runs文件夹
            torch.save(checkpoint, os.path.join(self.save_dir, 'best_model.pth'))
            # 同时保存到model文件夹
            torch.save(checkpoint, os.path.join(self.model_dir, 'best_model.pth'))
            print(f"{Fore.GREEN}✓ 保存最佳模型到 {self.save_dir} 和 {self.model_dir} (验证准确率: {metrics['val_acc']*100:.2f}%){Style.RESET_ALL}")

    def cleanup(self):
        """清理资源"""
        # 关闭TensorBoard writer
        if hasattr(self, 'writer'):
            self.writer.close()
            
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
