import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F

class ConditionalDiffusionModel(nn.Module):
    def __init__(self, input_dim, condition_dim, hidden_dim=128, num_timesteps=1000, cfg_dropout_prob=0.1):
        super(ConditionalDiffusionModel, self).__init__()
        self.input_dim = input_dim
        self.condition_dim = condition_dim
        self.hidden_dim = hidden_dim
        self.num_timesteps = num_timesteps
        self.cfg_dropout_prob = cfg_dropout_prob

        # Noise predictor network
        self.noise_predictor = nn.Sequential(
            nn.Linear(input_dim + condition_dim + hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Linear(hidden_dim, input_dim)
        )

        # Time step embedding
        self.timestep_embedding = nn.Embedding(num_timesteps, hidden_dim)

    def forward(self, x, condition, t, use_cfg=True):
        t_embed = self.timestep_embedding(t)  # Shape: (batch_size, hidden_dim)
        
        if use_cfg and self.training:
            # Randomly drop condition during training for CFG
            mask = torch.rand(condition.size(0), 1) > self.cfg_dropout_prob
            condition = condition * mask.to(condition.device)
        
        condition = torch.cat([condition, t_embed], dim=-1)  # Shape: (batch_size, condition_dim + hidden_dim)
        x = torch.cat([x, condition], dim=-1)  # Shape: (batch_size, input_dim + condition_dim + hidden_dim)
        predicted_noise = self.noise_predictor(x)
        return predicted_noise

    def loss_fn(self, x, condition, t, noise):
        predicted_noise = self.forward(x, condition, t)
        loss = F.mse_loss(predicted_noise, noise)
        return loss

    def sample(self, condition, num_samples, cfg_scale=7.5):
        x = torch.randn(num_samples, self.input_dim).to(condition.device)
        for t in reversed(range(self.num_timesteps)):
            t_tensor = torch.tensor([t] * num_samples).to(condition.device)
            
            # Predict noise with and without condition
            predicted_noise_cond = self.forward(x, condition, t_tensor, use_cfg=True)
            predicted_noise_uncond = self.forward(x, torch.zeros_like(condition), t_tensor, use_cfg=False)
            
            # Combine predictions using CFG scale
            predicted_noise = predicted_noise_uncond + cfg_scale * (predicted_noise_cond - predicted_noise_uncond)
            
            alpha = 1.0 / self.num_timesteps  # Replace with a noise schedule
            x = x - alpha * predicted_noise
        return x

# Define model and optimizer
input_dim = 64
condition_dim = 32
model = ConditionalDiffusionModel(input_dim, condition_dim)
optimizer = optim.Adam(model.parameters(), lr=1e-3)

# Define dataset
num_samples = 1000
x_data = torch.randn(num_samples, input_dim)
condition_data = torch.randn(num_samples, condition_dim)
dataset = TensorDataset(x_data, condition_data)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Training loop
num_epochs = 100
for epoch in range(num_epochs):
    for batch_x, batch_condition in dataloader:
        batch_size = batch_x.size(0)
        t = torch.randint(0, model.num_timesteps, (batch_size,))
        noise = torch.randn_like(batch_x)
        loss = model.loss_fn(batch_x, batch_condition, t, noise)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")

# Save and load model
torch.save(model.state_dict(), "conditional_diffusion_model.pth")
model = ConditionalDiffusionModel(input_dim, condition_dim)
model.load_state_dict(torch.load("conditional_diffusion_model.pth"))
model.eval()

# Generate samples with CFG
num_samples = 5
condition = torch.randn(num_samples, condition_dim)
with torch.no_grad():
    generated_samples = model.sample(condition, num_samples, cfg_scale=7.5)
print("Generated samples:")
print(generated_samples)