import os
import mne
import time
import torch
import dataLoad
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.utils.data.dataset as Dataset

# cuda
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class Generator(nn.Module):
    def __init__(self, noise_dim=50, seq_length=500):
        super(Generator, self).__init__()

        self.seq_length = seq_length
        self.noise_dim = noise_dim

        # Bi-LSTM layers
        self.lstm1 = nn.LSTM(input_size=noise_dim, hidden_size=30, batch_first=True, bidirectional=True)
        self.lstm2 = nn.LSTM(input_size=60, hidden_size=30, batch_first=True, bidirectional=True)

        # Dropout layer
        self.dropout = nn.Dropout(0.2)

        # Dense layer (output one value for each timestep)
        self.dense = nn.Linear(60, 1)  # output size 60 due to Bi-LSTM bidirectionality

        # Activation function
        self.activation = nn.Tanh()

    def forward(self, x):
        # Pass through the first Bi-LSTM layer
        x, _ = self.lstm1(x)
        x = self.activation(x)

        # Pass through the second Bi-LSTM layer
        x, _ = self.lstm2(x)
        x = self.activation(x)

        # Apply dropout
        x = self.dropout(x)

        # Apply the dense layer for each time step
        x = self.dense(x)

        # Apply activation function (tanh) and return the output
        x = self.activation(x)
        return x


class Discriminator(nn.Module):
    def __init__(self, seq_length=500):
        super(Discriminator, self).__init__()

        self.seq_length = seq_length

        # Bi-LSTM layers
        self.lstm1 = nn.LSTM(input_size=1, hidden_size=30, batch_first=True, bidirectional=True)
        self.lstm2 = nn.LSTM(input_size=60, hidden_size=30, batch_first=True, bidirectional=True)

        # Dropout layer
        self.dropout = nn.Dropout(0.2)

        # Flatten the output from LSTM to feed into the dense layer
        self.flatten = nn.Flatten()

        # Dense layer (single output for binary classification)
        self.dense = nn.Linear(60 * seq_length, 1)

        # Activation function for the final output
        self.sigmoid = nn.Sigmoid()

        # Activation function
        self.tanh = nn.Tanh()

    def forward(self, x):
        # Pass through the first Bi-LSTM layer
        x, _ = self.lstm1(x)
        x = self.tanh(x)

        # Pass through the second Bi-LSTM layer
        x, _ = self.lstm2(x)
        x = self.tanh(x)

        # Apply dropout
        x = self.dropout(x)

        # Flatten the output
        x = self.flatten(x)

        # Apply the dense layer
        x = self.dense(x)

        # Apply sigmoid activation for binary classification
        x = self.sigmoid(x)
        return x


# 生成单通道数据
def model_train(dataloader, device, epochs=150, model_save=False,
                             model_save_path="./model_param/MIEEGModel"):
    # 创建模型和优化器实例
    generator = Generator().to(device)
    discriminator = Discriminator().to(device)
    # 生成器优化器，目的是减小生成器生成的数据与真实数据的差距
    optimizer_G = optim.Adam(generator.parameters(), lr=0.0001)
    # 判别器优化器，目的是增大判别器判别生成器生成的数据与真实数据的差距
    optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0001)
    # 二分类交叉熵损失函数
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        for i, data in enumerate(dataloader):
            real_data = data[0].to(device)
            # print(real_data.shape)
            # 因为是单通道数据，所以需要去掉通道维度
            # real_data = real_data.squeeze(1)
            bs = real_data.size(0)
            # 生成噪声输入生成器
            noise = torch.randn(bs, 500, 50).to(device)

            # 训练生成器
            # [bs, 500, 50] -> [bs, 500, 1]
            fake_data = generator(noise)

            optimizer_G.zero_grad()
            # 计算生成器的损失
            g_loss = criterion(discriminator(fake_data), torch.ones(bs, 1).to(device))
            g_loss.backward()
            optimizer_G.step()

            # 计算判别器的损失
            optimizer_D.zero_grad()
            real_loss = criterion(discriminator(real_data), torch.ones(bs, 1).to(device))
            fake_loss = criterion(discriminator(fake_data.detach()), torch.zeros(bs, 1).to(device))
            d_loss = real_loss + fake_loss

            # 观察real_loss与fake_loss，同时下降同时达到最小值，并且差不多大，说明D已经稳定了
            d_loss.backward()
            optimizer_D.step()

            if i % 200 == 0:
                print(f"Epoch {epoch}, Iteration {i}, D Loss: {d_loss.item()}, G Loss: {g_loss.item()}")

    if model_save:
        # 路径 + 时间戳
        torch.save(generator.state_dict(), model_save_path + "/generator_" + str(int(time.time())) + ".pt")
