import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class Generator(nn.Module):
    def __init__(self, input_dim, output_shape):
        super().__init__()
        self.output_shape = output_shape
        self.fc = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(0.2),

            nn.Linear(256, 512),
            nn.BatchNorm1d(512),
            nn.LeakyReLU(0.2),

            nn.Linear(512, 1024),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),

            nn.Linear(1024, int(np.prod(output_shape))),
            nn.Tanh()
        )

    def forward(self, z):
        x = self.fc(z)
        return x.view(-1, *self.output_shape)


class Discriminator(nn.Module):
    def __init__(self, input_shape):
        super().__init__()
        self.input_shape = input_shape

        self.model = nn.Sequential(
            nn.Linear(int(np.prod(input_shape)), 1024),
            nn.LeakyReLU(0.2),
            nn.Dropout(0.3),

            nn.Linear(1024, 512),
            nn.LeakyReLU(0.2),
            nn.Dropout(0.3),

            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Dropout(0.3),

            nn.Linear(256, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = x.view(-1, int(np.prod(self.input_shape)))
        return self.model(x)


class CSIGAN:
    def __init__(self, config):
        self.config = config
        input_channels = 4 if config.feature_type == "enhanced" else 2
        self.input_shape = (input_channels, 3, 30, 200)

        self.generator = Generator(100, self.input_shape).to(config.device)
        self.discriminator = Discriminator(self.input_shape).to(config.device)

        self.optimizer_G = torch.optim.Adam(
            self.generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(
            self.discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))

        self.criterion = nn.BCELoss()

    def generate_samples(self, real_samples, labels, target_class):
        # 只对目标类别(如fall)进行增强
        mask = (labels == target_class)
        if not mask.any():
            return None

        real_samples_class = real_samples[mask]
        z = torch.randn(len(real_samples_class), 100).to(self.config.device)
        fake_samples = self.generator(z)
        return fake_samples.detach()

    def train(self, real_samples, labels, target_class):
        real_samples = real_samples.to(self.config.device)
        mask = (labels == target_class)
        if not mask.any():
            return None

        real_samples_class = real_samples[mask]
        batch_size = len(real_samples_class)

        # 真实和假的标签
        real_labels = torch.ones(batch_size, 1).to(self.config.device)
        fake_labels = torch.zeros(batch_size, 1).to(self.config.device)

        # ---------------------
        #  训练判别器
        # ---------------------
        self.optimizer_D.zero_grad()

        # 真实样本的损失
        outputs_real = self.discriminator(real_samples_class)
        d_loss_real = self.criterion(outputs_real, real_labels)

        # 生成假样本
        z = torch.randn(batch_size, 100).to(self.config.device)
        fake_samples = self.generator(z)

        # 假样本的损失
        outputs_fake = self.discriminator(fake_samples.detach())
        d_loss_fake = self.criterion(outputs_fake, fake_labels)

        # 总判别器损失
        d_loss = d_loss_real + d_loss_fake
        d_loss.backward()
        self.optimizer_D.step()

        # ---------------------
        #  训练生成器
        # ---------------------
        self.optimizer_G.zero_grad()

        # 生成器希望判别器将假样本分类为真
        outputs = self.discriminator(fake_samples)
        g_loss = self.criterion(outputs, real_labels)

        g_loss.backward()
        self.optimizer_G.step()

        return d_loss.item(), g_loss.item()