import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np

# 导入之前修复的显示感知网络作为教师模型
try:
    from display_aware_network import DisplayAwareRRDBNet
except ImportError:
    print("Warning: display_aware_network not found. Using a dummy teacher model.")
    # 如果无法导入，创建一个简单的教师模型类
    class DisplayAwareRRDBNet(nn.Module):
        def __init__(self):
            super().__init__()
            self.conv = nn.Conv2d(3, 3, 3, padding=1)
        
        def forward(self, x):
            return self.conv(x)

class DistillationLoss(nn.Module):
    """知识蒸馏损失函数"""
    
    def __init__(self, alpha=0.7, temperature=4.0):
        super().__init__()
        self.alpha = alpha
        self.temperature = temperature
        self.mse_loss = nn.MSELoss()
    
    def forward(self, student_outputs, teacher_outputs, targets):
        # 硬标签损失（学生与真实标签）
        hard_loss = self.mse_loss(student_outputs, targets)
        
        # 软标签损失（学生与教师输出）
        soft_loss = F.mse_loss(
            student_outputs / self.temperature,
            teacher_outputs / self.temperature
        ) * (self.temperature ** 2)
        
        # 组合损失
        total_loss = self.alpha * soft_loss + (1 - self.alpha) * hard_loss
        
        return total_loss, hard_loss, soft_loss

class LightweightRRDBNet(nn.Module):
    """轻量级Real-ESRGAN学生网络"""
    
    def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=32, num_block=8, num_grow_ch=16):
        super().__init__()
        
        # 减少通道数和残差块数
        self.num_feat = num_feat
        self.num_block = num_block
        
        # 初始卷积
        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
        
        # 轻量级残差块
        self.body = nn.ModuleList()
        for i in range(num_block):
            self.body.append(
                LightweightResidualBlock(num_feat + i * num_grow_ch, num_grow_ch)
            )
        
        # 特征融合
        self.conv_body = nn.Conv2d(num_feat + num_block * num_grow_ch, num_feat, 3, 1, 1)
        
        # 上采样
        self.upconv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.upconv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
        
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
    
    def forward(self, x):
        feat = self.lrelu(self.conv_first(x))
        
        # 残差块
        for block in self.body:
            feat = block(feat)
        
        feat = self.lrelu(self.conv_body(feat))
        
        # 上采样
        feat = self.lrelu(self.upconv1(F.interpolate(feat, scale_factor=2, mode='nearest')))
        feat = self.lrelu(self.upconv2(F.interpolate(feat, scale_factor=2, mode='nearest')))
        
        out = self.conv_last(self.lrelu(self.conv_hr(feat)))
        return out

class LightweightResidualBlock(nn.Module):
    """轻量级残差块，支持通道增长"""
    
    def __init__(self, in_channels, grow_channels):
        super(LightweightResidualBlock, self).__init__()
        # 第一个卷积：从输入通道到增长通道
        self.conv1 = nn.Conv2d(in_channels, grow_channels, 3, 1, 1)
        # 第二个卷积：从增长通道到增长通道
        self.conv2 = nn.Conv2d(grow_channels, grow_channels, 3, 1, 1)
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
    
    def forward(self, x):
        # 残差连接：只在输入的原始通道上添加identity
        identity = x
        out = self.lrelu(self.conv1(x))
        out = self.conv2(out)
        # 连接输入和输出，实现通道增长
        return torch.cat([identity, out], dim=1)

def distill_model(teacher_model, train_loader, val_loader, epochs=100, device='cuda'):
    """执行知识蒸馏"""
    
    # 创建学生模型
    student_model = LightweightRRDBNet(
        num_feat=32,  # 减少特征通道数
        num_block=8,  # 减少残差块数
        num_grow_ch=16
    ).to(device)
    
    # 损失函数和优化器
    criterion = DistillationLoss(alpha=0.7, temperature=4.0)
    optimizer = torch.optim.Adam(student_model.parameters(), lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5)
    
    teacher_model.eval()
    
    print("开始知识蒸馏训练...")
    for epoch in range(epochs):
        student_model.train()
        running_loss = 0.0
        
        for batch_idx, (lr_imgs, hr_imgs) in enumerate(train_loader):
            lr_imgs = lr_imgs.to(device)
            hr_imgs = hr_imgs.to(device)
            
            # 教师预测
            with torch.no_grad():
                teacher_outputs = teacher_model(lr_imgs)
            
            # 学生预测
            student_outputs = student_model(lr_imgs)
            
            # 计算蒸馏损失
            total_loss, hard_loss, soft_loss = criterion(
                student_outputs, teacher_outputs, hr_imgs
            )
            
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()
            
            running_loss += total_loss.item()
            
            if batch_idx % 100 == 0:
                print(f'Epoch [{epoch+1}/{epochs}], Batch [{batch_idx}/{len(train_loader)}], '
                      f'Loss: {total_loss.item():.4f}, Hard: {hard_loss.item():.4f}, Soft: {soft_loss.item():.4f}')
        
        scheduler.step()
        
        # 验证
        if (epoch + 1) % 10 == 0:
            val_psnr = validate_model(student_model, val_loader, device)
            print(f'Epoch [{epoch+1}/{epochs}], Val PSNR: {val_psnr:.2f}dB')
    
    return student_model

def validate_model(model, val_loader, device):
    """验证模型性能"""
    model.eval()
    psnr_values = []
    
    with torch.no_grad():
        for lr_imgs, hr_imgs in val_loader:
            lr_imgs = lr_imgs.to(device)
            hr_imgs = hr_imgs.to(device)
            
            outputs = model(lr_imgs)
            
            # 计算PSNR
            mse = F.mse_loss(outputs, hr_imgs)
            psnr = 20 * torch.log10(1.0 / torch.sqrt(mse))
            psnr_values.append(psnr.item())
    
    return np.mean(psnr_values)

# 模型参数统计
def count_parameters(model):
    """统计模型参数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

if __name__ == "__main__":
    # 测试知识蒸馏
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 初始化教师模型和学生模型用于参数量统计
    teacher_model = DisplayAwareRRDBNet()
    student_model = LightweightRRDBNet()
    
    # 统计参数量
    teacher_params = count_parameters(teacher_model)
    student_params = count_parameters(student_model)
    
    print(f"教师模型参数量: {teacher_params/1e6:.2f}M")
    print(f"学生模型参数量: {student_params/1e6:.2f}M")
    print(f"压缩比例: {teacher_params/student_params:.2f}x")
    
    # 注意：完整的蒸馏需要实际的数据集
    print("注意：要执行完整的知识蒸馏，需要准备实际的训练和验证数据加载器。")
    print("可以取消下面的注释并提供相应的数据加载器：")
    # train_loader, val_loader = ...  # 替换为实际的数据加载器
    # student_model = distill_model(teacher_model, train_loader, val_loader, epochs=50, device=device)