import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import sys
import os

# 添加模块路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from moudle import (
    create_data_loaders, 
    Trainer, 
    setup_device, 
    load_label_mapping,
    TensorBoardLogger,
    YAMLConfig,
    load_model
)


class Inception(nn.Module):
    """Inception模块"""
    
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super(Inception, self).__init__()
        
        # 1x1 卷积分支
        self.branch1 = nn.Sequential(
            nn.Conv2d(in_channels, ch1x1, kernel_size=1),
            nn.BatchNorm2d(ch1x1),
            nn.ReLU(True)
        )
        
        # 1x1 -> 3x3 卷积分支
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channels, ch3x3red, kernel_size=1),
            nn.BatchNorm2d(ch3x3red),
            nn.ReLU(True),
            nn.Conv2d(ch3x3red, ch3x3, kernel_size=3, padding=1),
            nn.BatchNorm2d(ch3x3),
            nn.ReLU(True)
        )
        
        # 1x1 -> 5x5 卷积分支
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_channels, ch5x5red, kernel_size=1),
            nn.BatchNorm2d(ch5x5red),
            nn.ReLU(True),
            nn.Conv2d(ch5x5red, ch5x5, kernel_size=5, padding=2),
            nn.BatchNorm2d(ch5x5),
            nn.ReLU(True)
        )
        
        # 3x3 池化 -> 1x1 卷积分支
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            nn.Conv2d(in_channels, pool_proj, kernel_size=1),
            nn.BatchNorm2d(pool_proj),
            nn.ReLU(True)
        )
    
    def forward(self, x):
        branch1 = self.branch1(x)
        branch2 = self.branch2(x)
        branch3 = self.branch3(x)
        branch4 = self.branch4(x)
        
        outputs = [branch1, branch2, branch3, branch4]
        return torch.cat(outputs, 1)


class InceptionAux(nn.Module):
    """辅助分类器"""
    
    def __init__(self, in_channels, num_classes):
        super(InceptionAux, self).__init__()
        self.conv = nn.Sequential(
            nn.AvgPool2d(kernel_size=5, stride=3),
            nn.Conv2d(in_channels, 128, kernel_size=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True)
        )
        
        self.fc = nn.Sequential(
            nn.Linear(2048, 1024),
            nn.ReLU(True),
            nn.Dropout(0.7),
            nn.Linear(1024, num_classes)
        )
    
    def forward(self, x):
        x = self.conv(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x


class GoogLeNet(nn.Module):
    """GoogLeNet (Inception v1) 模型实现"""
    
    def __init__(self, num_classes=100, aux_logits=True, dropout=0.2):
        super(GoogLeNet, self).__init__()
        self.aux_logits = aux_logits
        
        # 第一部分：卷积层
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
        
        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.Conv2d(64, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
        
        # Inception模块
        self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
        self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        
        self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
        self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        
        self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
        self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
        
        # 辅助分类器
        if self.aux_logits:
            self.aux1 = InceptionAux(512, num_classes)
            self.aux2 = InceptionAux(528, num_classes)
        
        # 平均池化和分类器
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(1024, num_classes)
        
        # 权重初始化
        self._initialize_weights()
    
    def _initialize_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        # 第一部分
        x = self.conv1(x)
        x = self.conv2(x)
        
        # Inception 3a, 3b
        x = self.inception3a(x)
        x = self.inception3b(x)
        x = self.maxpool3(x)
        
        # Inception 4a, 4b, 4c, 4d, 4e
        x = self.inception4a(x)
        aux1 = None
        if self.aux_logits and self.training:
            aux1 = self.aux1(x)
        
        x = self.inception4b(x)
        x = self.inception4c(x)
        x = self.inception4d(x)
        aux2 = None
        if self.aux_logits and self.training:
            aux2 = self.aux2(x)
        
        x = self.inception4e(x)
        x = self.maxpool4(x)
        
        # Inception 5a, 5b
        x = self.inception5a(x)
        x = self.inception5b(x)
        
        # 分类器
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.dropout(x)
        x = self.fc(x)
        
        if self.aux_logits and self.training:
            return x, aux2, aux1
        return x


class GoogLeNetTrainer(Trainer):
    """GoogLeNet专用训练器，处理辅助损失"""
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.aux_weight = 0.3  # 辅助损失权重
    
    def train_epoch(self, train_loader, epoch):
        """训练一个epoch，处理辅助损失"""
        self.model.train()
        running_loss = 0.0
        running_corrects = 0
        total_samples = 0
        
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(self.device), target.to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            
            if self.model.aux_logits and self.model.training:
                outputs, aux2, aux1 = self.model(data)
                loss1 = self.criterion(outputs, target)
                loss2 = self.criterion(aux2, target)
                loss3 = self.criterion(aux1, target)
                loss = loss1 + self.aux_weight * loss2 + self.aux_weight * loss3
            else:
                outputs = self.model(data)
                loss = self.criterion(outputs, target)
            
            # 反向传播
            loss.backward()
            self.optimizer.step()
            
            # 统计
            running_loss += loss.item()
            batch_acc = self.calculate_accuracy(outputs, target)
            running_corrects += batch_acc * target.size(0)
            total_samples += target.size(0)
            
            # 记录到TensorBoard
            if self.tensorboard_logger:
                global_step = epoch * len(train_loader) + batch_idx
                self.tensorboard_logger.log_training_batch(global_step, loss.item(), batch_acc * 100)
            
            # 打印进度
            if batch_idx % self.print_freq == 0:
                current_acc = 100. * running_corrects / total_samples
                print(f'  批次 [{batch_idx}/{len(train_loader)}], '
                      f'损失: {loss.item():.4f}, '
                      f'准确率: {current_acc:.2f}%')
        
        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100. * running_corrects / total_samples
        return epoch_loss, epoch_acc
    
    def calculate_accuracy(self, outputs, targets):
        """计算准确率"""
        _, predicted = torch.max(outputs.data, 1)
        total = targets.size(0)
        correct = (predicted == targets).sum().item()
        return correct / total


def create_googlenet_config():
    """创建GoogLeNet的默认配置"""
    config = YAMLConfig()
    
    # 更新GoogLeNet专用配置
    googlenet_config = {
        # 数据配置
        'data_dir': '../dataset/Mini-ImageNet-Dataset',
        'label_file': '../dataset/label.json',
        'batch_size': 256,  # GoogLeNet可以使用较大的batch size
        'num_workers': 0,
        'pin_memory': True,
        'img_size': 224,
        'num_classes': 100,
        
        # 数据增强配置
        'normalize_mean': [0.485, 0.456, 0.406],
        'normalize_std': [0.229, 0.224, 0.225],
        
        # 训练配置
        'num_epochs': 100,
        'lr': 0.01,  # GoogLeNet使用较大的学习率
        'momentum': 0.9,
        'weight_decay': 1e-4,
        'print_freq': 50,
        
        # 学习率调度器
        'scheduler_type': 'StepLR',
        'step_size': 30,  # 每30个epoch学习率衰减
        'gamma': 0.1,
        
        # 路径配置
        'log_dir': './logs/googlenet',
        'model_save_path': './model/googlenet_model.pth',
        'save_best': True,
        
        # 模型配置
        'model_name': 'GoogLeNet',
        'dropout': 0.2,
        'aux_logits': True,  # 是否使用辅助分类器
    }
    
    config.update(googlenet_config)
    return config


def main():
    print("=== GoogLeNet (Inception v1) 训练 ===")
    
    # 1. 创建配置
    config = create_googlenet_config()
    
    # 检查是否存在YAML配置文件，如果存在则加载
    config_file = os.path.join(os.path.dirname(__file__), 'config.yaml')
    if os.path.exists(config_file):
        print(f"发现配置文件 {config_file}，加载中...")
        config = YAMLConfig(config_file)
    else:
        print("使用默认配置")
        # 保存默认配置到文件
        config.save_config(config_file)
        print(f"默认配置已保存到 {config_file}")
    
    # 打印配置信息
    print("\n=== 当前配置 ===")
    print(f"模型: {config.get('model_name')}")
    print(f"批次大小: {config.get('batch_size')}")
    print(f"学习率: {config.get('lr')}")
    print(f"训练轮数: {config.get('num_epochs')}")
    print(f"图像尺寸: {config.get('img_size')}")
    print(f"Dropout: {config.get('dropout')}")
    print(f"辅助分类器: {config.get('aux_logits')}")
    print("=" * 20)
    
    # 2. 设置设备
    device = setup_device()
    
    # 3. 创建数据加载器
    print("\n加载数据集...")
    train_loader, val_loader, test_loader, train_dataset = create_data_loaders(
        data_dir=config.get('data_dir'),
        batch_size=config.get('batch_size'),
        num_workers=config.get('num_workers'),
        pin_memory=config.get('pin_memory'),
        img_size=config.get('img_size'),
        mean=config.get('normalize_mean'),
        std=config.get('normalize_std')
    )
    
    print(f"训练集大小: {len(train_dataset)}")
    print(f"训练批次数: {len(train_loader)}")
    print(f"验证批次数: {len(val_loader)}")
    print(f"测试批次数: {len(test_loader)}")
    
    # 4. 加载标签映射
    label_mapping = load_label_mapping(config.get('label_file'))
    
    # 5. 创建模型
    print("\n创建GoogLeNet模型...")
    model = GoogLeNet(
        num_classes=config.get('num_classes'),
        aux_logits=config.get('aux_logits'),
        dropout=config.get('dropout')
    )
    
    # 6. 创建损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(
        model.parameters(), 
        lr=config.get('lr'), 
        momentum=config.get('momentum'), 
        weight_decay=config.get('weight_decay')
    )
    
    # 7. 创建学习率调度器
    if config.get('scheduler_type') == 'StepLR':
        scheduler = optim.lr_scheduler.StepLR(
            optimizer, 
            step_size=config.get('step_size'), 
            gamma=config.get('gamma')
        )
    else:
        scheduler = None
    
    # 8. 创建TensorBoard记录器
    tensorboard_logger = TensorBoardLogger(
        log_dir=config.get('log_dir'),
        model_name=config.get('model_name', 'googlenet'),
        comment='inception_v1'
    )
    
    # 9. 创建训练器（使用专用的GoogLeNet训练器）
    trainer = GoogLeNetTrainer(
        model=model,
        device=device,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        tensorboard_logger=tensorboard_logger,
        model_save_path=config.get('model_save_path'),
        print_freq=config.get('print_freq')
    )
    
    # 10. 开始训练
    training_history = trainer.train(
        train_loader=train_loader,
        val_loader=val_loader,
        num_epochs=config.get('num_epochs'),
        save_best=config.get('save_best')
    )
    
    # 11. 测试模型
    print('\n加载最佳模型进行测试...')
    best_model_path = config.get('model_save_path').replace('.pth', '_best.pth')
    model = load_model(model, best_model_path, device)
    
    # 测试时关闭辅助分类器
    model.aux_logits = False
    model.eval()
    
    # 重新创建测试用的训练器
    test_trainer = Trainer(model=model, device=device)
    test_accuracy = test_trainer.test(
        test_loader=test_loader,
        train_dataset=train_dataset,
        label_mapping=label_mapping,
        num_classes=config.get('num_classes')
    )
    
    # 12. 记录最终结果
    tensorboard_logger.log_test_results(test_accuracy)
    tensorboard_logger.log_final_summary(
        training_time=training_history['training_time'],
        best_val_acc=training_history['best_val_acc'],
        test_acc=test_accuracy,
        model_path=config.get('model_save_path')
    )
    
    # 13. 保存最终配置
    final_config_path = os.path.join(config.get('log_dir'), 'final_config.yaml')
    config.save_config(final_config_path)
    print(f"\n最终配置已保存至: {final_config_path}")
    
    # 14. 打印最终结果
    print(f'\n=== GoogLeNet 训练总结 ===')
    print(f'训练时间: {training_history["training_time"]:.2f} 秒')
    print(f'最佳验证准确率: {training_history["best_val_acc"]:.2f}%')
    print(f'最终测试准确率: {test_accuracy:.2f}%')
    print(f'模型已保存至: {config.get("model_save_path")}')
    print('=' * 35)
    
    # 15. 关闭TensorBoard并打印命令
    tensorboard_logger.print_tensorboard_command()
    tensorboard_logger.close()


if __name__ == '__main__':
    main()
