import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import List, Tuple
from bishe_situations.utils import PYSR_PARAMS, postprocess_params, history_to_kwargs, preprocess_params

class TimeEmbedding(nn.Module):
    def __init__(self, time_dim: int):
        super().__init__()
        self.time_dim = time_dim
        self.time_mlp = nn.Sequential(
            nn.Linear(1, time_dim),
            nn.SiLU(),
            nn.Linear(time_dim, time_dim)
        )
    
    def forward(self, t):
        return self.time_mlp(t.unsqueeze(-1))

class ResidualBlock(nn.Module):
    def __init__(self, in_channels: int, out_channels: int, time_dim: int):
        super().__init__()
        self.time_mlp = TimeEmbedding(time_dim)
        
        self.conv1 = nn.Conv1d(in_channels, out_channels, 3, padding=1)
        self.conv2 = nn.Conv1d(out_channels, out_channels, 3, padding=1)
        self.time_conv = nn.Conv1d(time_dim, out_channels, 1)
        
        if in_channels != out_channels:
            self.shortcut = nn.Conv1d(in_channels, out_channels, 1)
        else:
            self.shortcut = nn.Identity()
    
    def forward(self, x, t):
        time_emb = self.time_mlp(t)
        time_emb = time_emb.unsqueeze(-1).expand(-1, -1, x.shape[-1])
        
        h = F.silu(self.conv1(x))
        h = self.conv2(h + self.time_conv(time_emb))
        return h + self.shortcut(x)

class DiffusionModel(nn.Module):
    def __init__(self, param_dim: int, time_dim: int = 256):
        super().__init__()
        self.time_dim = time_dim
        self.param_dim = param_dim
        
        # MSE输入处理
        self.mse_encoder = nn.Sequential(
            nn.Linear(1, 64),
            nn.SiLU(),
            nn.Linear(64, 64)
        )
        
        # 下采样路径
        self.down1 = ResidualBlock(64, 128, time_dim)
        self.down2 = ResidualBlock(128, 256, time_dim)
        self.down3 = ResidualBlock(256, 512, time_dim)
        
        # 中间层
        self.middle = ResidualBlock(512, 512, time_dim)
        
        # 上采样路径
        self.up1 = ResidualBlock(512 + 512, 256, time_dim)
        self.up2 = ResidualBlock(256 + 256, 128, time_dim)
        self.up3 = ResidualBlock(128 + 128, 64, time_dim)
        
        # 输出层
        self.output_conv = nn.Conv1d(64, param_dim, 1)
        
    def forward(self, mse: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
        # mse: [batch_size, 1]
        # t: [batch_size]
        
        # 编码MSE
        x = self.mse_encoder(mse)  # [batch_size, 64]
        x = x.unsqueeze(-1)  # [batch_size, 64, 1]
        
        # 下采样
        x1 = x
        x2 = self.down1(x1, t)
        x3 = self.down2(x2, t)
        x4 = self.down3(x3, t)
        
        # 中间层
        x5 = self.middle(x4, t)
        
        # 上采样
        x = self.up1(torch.cat([x5, x4], dim=1), t)
        x = self.up2(torch.cat([x, x3], dim=1), t)
        x = self.up3(torch.cat([x, x2], dim=1), t)
        
        # 输出
        x = self.output_conv(x)  # [batch_size, param_dim, 1]
        return x.squeeze(-1)  # [batch_size, param_dim]

class DiffusionTrainer:
    def __init__(self, 
                 param_dim: int,
                 n_steps: int = 1000,
                 beta_start: float = 1e-4,
                 beta_end: float = 0.02,
                 device: str = 'cuda'):
        self.device = device
        self.n_steps = n_steps
        self.param_dim = param_dim
        
        # 创建模型
        self.model = DiffusionModel(param_dim).to(device)
        
        # 设置beta schedule
        self.beta = torch.linspace(beta_start, beta_end, n_steps).to(device)
        self.alpha = 1 - self.beta
        self.alpha_bar = torch.cumprod(self.alpha, dim=0)
        
    def add_noise(self, x: torch.Tensor, t: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        noise = torch.randn_like(x)
        alpha_bar_t = self.alpha_bar[t].view(-1, 1)
        return torch.sqrt(alpha_bar_t) * x + torch.sqrt(1 - alpha_bar_t) * noise, noise
    
    def remove_noise(self, x: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
        alpha_bar_t = self.alpha_bar[t].view(-1, 1)
        predicted_noise = self.model(x, t)
        return (x - torch.sqrt(1 - alpha_bar_t) * predicted_noise) / torch.sqrt(alpha_bar_t)
    
    def train_step(self, mse: torch.Tensor, params: torch.Tensor, optimizer: torch.optim.Optimizer) -> float:
        optimizer.zero_grad()
        
        # 随机采样时间步
        t = torch.randint(0, self.n_steps, (mse.shape[0],), device=self.device)
        
        # 添加噪声
        noisy_params, noise = self.add_noise(params, t)
        
        # 预测噪声
        predicted_noise = self.model(mse, t)
        
        # 计算损失
        loss = F.mse_loss(predicted_noise, noise)
        
        # 反向传播
        loss.backward()
        optimizer.step()
        
        return loss.item()
    
    def sample(self, mse: torch.Tensor) -> torch.Tensor:
        self.model.eval()
        with torch.no_grad():
            # 从标准正态分布采样
            x = torch.randn_like(mse.expand(-1, self.param_dim))
            
            # 逐步去噪
            for t in reversed(range(self.n_steps)):
                t_batch = torch.full((mse.shape[0],), t, device=self.device)
                x = self.remove_noise(x, t_batch)
        
        self.model.train()
        return x

def train_diffusion_model(train_data: List[Tuple[float, dict]], 
                         n_epochs: int = 1000,
                         batch_size: int = 32,
                         learning_rate: float = 1e-4,
                         device: str = 'cuda'):
    # 准备数据
    mse_values = torch.tensor([mse for mse, _ in train_data], device=device)
    param_values = torch.tensor([list(params.values()) for _, params in train_data], device=device)
    
    # 归一化数据
    mse_mean, mse_std = mse_values.mean(), mse_values.std()
    param_mean, param_std = param_values.mean(dim=0), param_values.std(dim=0)
    
    mse_normalized = (mse_values - mse_mean) / mse_std
    param_normalized = (param_values - param_mean) / param_std
    
    # 创建模型和优化器
    trainer = DiffusionTrainer(param_dim=param_values.shape[1], device=device)
    optimizer = torch.optim.Adam(trainer.model.parameters(), lr=learning_rate)
    
    # 训练循环
    for epoch in range(n_epochs):
        total_loss = 0
        n_batches = 0
        
        # 随机打乱数据
        indices = torch.randperm(len(train_data))
        
        for i in range(0, len(train_data), batch_size):
            batch_indices = indices[i:i+batch_size]
            
            # 准备批次数据
            batch_mse = mse_normalized[batch_indices]
            batch_params = param_normalized[batch_indices]
            
            # 训练步骤
            loss = trainer.train_step(batch_mse, batch_params, optimizer)
            total_loss += loss
            n_batches += 1
        
        # 打印训练进度
        if (epoch + 1) % 100 == 0:
            avg_loss = total_loss / n_batches
            print(f"Epoch {epoch+1}/{n_epochs}, Loss: {avg_loss:.4f}")
    
    return trainer, mse_mean, mse_std, param_mean, param_std

def predict_params(trainer: DiffusionTrainer,
                  mse: float,
                  mse_mean: float,
                  mse_std: float,
                  param_mean: torch.Tensor,
                  param_std: torch.Tensor,
                  n_samples: int = 10) -> dict:
    # 归一化MSE
    mse_normalized = (mse - mse_mean) / mse_std
    mse_tensor = torch.tensor([[mse_normalized]], device=trainer.device).expand(n_samples, 1)
    
    # 生成参数
    param_normalized = trainer.sample(mse_tensor)
    
    # 反归一化参数
    param_denormalized = param_normalized * param_std + param_mean
    
    # 转换为字典格式
    param_dicts = []
    for params in param_denormalized:
        param_dict = {}
        for i, (key, _) in enumerate(PYSR_PARAMS.items()):
            param_dict[key] = params[i].item()
        param_dicts.append(param_dict)
    
    return param_dicts

if __name__ == "__main__":
    # 示例用法
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    # 假设我们有一些训练数据
    train_data = [
        (0.1, {'param1': 0.5, 'param2': 0.3}),
        (0.2, {'param1': 0.6, 'param2': 0.4}),
        # ... 更多数据
    ]
    
    # 训练模型
    trainer, mse_mean, mse_std, param_mean, param_std = train_diffusion_model(
        train_data,
        n_epochs=1000,
        batch_size=32,
        device=device
    )
    
    # 预测新参数
    new_mse = 0.15
    predicted_params = predict_params(
        trainer,
        new_mse,
        mse_mean,
        mse_std,
        param_mean,
        param_std,
        n_samples=10
    )
    
    print("预测的参数:")
    for i, params in enumerate(predicted_params):
        print(f"样本 {i+1}:", params) 