import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import List, Tuple

class ParamDecoder(nn.Module):
    def __init__(self, param_dim: int, hidden_dim: int = 128):
        super().__init__()
        self.param_dim = param_dim
        
        # MSE编码器
        self.mse_encoder = nn.Sequential(
            nn.Linear(1, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 参数解码器
        self.param_decoder = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, param_dim),
            nn.Sigmoid()  # 确保输出在[0,1]范围内
        )
    
    def forward(self, mse: torch.Tensor) -> torch.Tensor:
        # mse: [batch_size, 1]
        x = self.mse_encoder(mse)
        params = self.param_decoder(x)
        return params

class ParamPredictor:
    def __init__(self, param_dim: int, device: str = 'cuda'):
        self.device = device
        self.model = ParamDecoder(param_dim).to(device)
        self.param_dim = param_dim
    
    def train_step(self, mse: torch.Tensor, params: torch.Tensor, optimizer: torch.optim.Optimizer) -> float:
        optimizer.zero_grad()
        
        # 预测参数
        predicted_params = self.model(mse)
        
        # 计算损失
        loss = F.mse_loss(predicted_params, params)
        
        # 反向传播
        loss.backward()
        optimizer.step()
        
        return loss.item()
    
    def predict(self, mse: torch.Tensor) -> torch.Tensor:
        self.model.eval()
        with torch.no_grad():
            return self.model(mse)
        self.model.train()

def train_param_predictor(train_data: List[Tuple[float, dict]], 
                         n_epochs: int = 1000,
                         batch_size: int = 32,
                         learning_rate: float = 1e-4,
                         device: str = 'cuda'):
    # 准备数据
    mse_values = torch.tensor([mse for mse, _ in train_data], device=device).float()
    param_values = torch.tensor([list(params.values()) for _, params in train_data], device=device).float()
    
    # 创建模型和优化器
    predictor = ParamPredictor(param_dim=param_values.shape[1], device=device)
    optimizer = torch.optim.Adam(predictor.model.parameters(), lr=learning_rate)
    
    # 训练循环
    for epoch in range(n_epochs):
        total_loss = 0
        n_batches = 0
        
        # 随机打乱数据
        indices = torch.randperm(len(train_data))
        
        for i in range(0, len(train_data), batch_size):
            batch_indices = indices[i:i+batch_size]
            
            # 准备批次数据
            batch_mse = mse_values[batch_indices].unsqueeze(-1)  # [batch_size, 1]
            batch_params = param_values[batch_indices]
            
            # 训练步骤
            loss = predictor.train_step(batch_mse, batch_params, optimizer)
            total_loss += loss
            n_batches += 1
        
        # 打印训练进度
        if (epoch + 1) % 100 == 0:
            avg_loss = total_loss / n_batches
            print(f"Epoch {epoch+1}/{n_epochs}, Loss: {avg_loss:.4f}")
    
    return predictor

def predict_params(predictor: ParamPredictor,
                  mse: float,
                  n_samples: int = 10) -> List[dict]:
    # 准备输入
    mse_tensor = torch.tensor([[mse]], device=predictor.device).float()
    mse_tensor = mse_tensor.expand(n_samples, 1)
    
    # 生成参数
    params = predictor.predict(mse_tensor)
    
    # 转换为字典格式
    param_dicts = []
    for param_set in params:
        param_dict = {
            "p_crossover": param_set[0].item(),
            "p_subtree_mutation": param_set[1].item(),
            "p_hoist_mutation": param_set[2].item(),
            "p_point_mutation": param_set[3].item()
        }
        param_dicts.append(param_dict)
    
    return param_dicts 