import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from gan import Generator, FeatureModel
import numpy as np
import matplotlib.pyplot as plt

import os
from src.EnvironmentVariables import DATA_PATH

HYPER_PARAMETERS = {
    "generator_input_size": 40,
    "num_historical_days": 40,
    "num_feature": 9,
    "dropout": 0.0,
    "LSTM_features": 48,
    "LSTM_hidden_size": 64,
    "LSTM_num_layers": 3,
    "batch_size": 32
}
HYPER_PARAMETERS["discriminator_features"] = HYPER_PARAMETERS["generator_input_size"] * HYPER_PARAMETERS[
    "num_historical_days"]


class OGANDiscriminator(nn.Module):
    def __init__(self, num_feature=5,
                 LSTM_features=64,
                 LSTM_hidden_size=256,
                 LSTM_num_layers=3,
                 dropout=0,
                 discriminator_features=128,
                 num_historical_days=40,
                 generator_input_size=30,
                 **kwargs):
        super(OGANDiscriminator, self).__init__()
        self.num_historical_days = num_historical_days
        self.generator_input_size = generator_input_size
        self.input = nn.Sequential(
            nn.Linear(num_feature, LSTM_features)
        )

        self.feature_model = FeatureModel(LSTM_features,
                                          LSTM_hidden_size,
                                          LSTM_num_layers,
                                          discriminator_features,
                                          dropout)

        self.out = nn.Sequential(
            nn.Linear(discriminator_features, 1)
        )

    def forward(self, x):
        x = self.input(x)
        encode = self.feature_model(x)
        x = self.out(encode)
        return x, encode


def correlation(_X, _y):
    dim = len(_X.size()) - 2
    _X1 = _X - torch.mean(_X, dim, True)
    _y1 = _y - torch.mean(_y, dim, True)
    sum1 = torch.sum(_X1 * _y1, dim, True)
    sum1 = sum1 / _X.shape[0]
    _X = torch.std(_X, dim, keepdim=True)
    _y = torch.std(_y, dim, keepdim=True)
    res = sum1 / (_X + _y)
    return torch.mean(res)


class OGANQPTrainer:
    generator = Generator(**HYPER_PARAMETERS).cuda()
    discriminator = OGANDiscriminator(**HYPER_PARAMETERS).cuda()

    d_optimizer = optim.RMSprop(discriminator.parameters(), lr=0.0001)
    g_optimizer = optim.RMSprop(generator.parameters(), lr=0.0001)

    gp_lambda = 10 / (50 * 9) ** 0.5

    DLosses = []
    GLosses = []
    Losses_times = 0

    wait = 0

    # 定义训练过程
    @classmethod
    def train(cls, X, Z, **kwargs):

        if len(cls.DLosses) % 1000 == 800:
            cls.DLosses = cls.DLosses[400:]
        if len(cls.GLosses) % 1000 == 800:
            cls.GLosses = cls.GLosses[400:]
        cls.Losses_times += 1
        if cls.Losses_times < 2000:
            for param in cls.discriminator.parameters():
                param.requires_grad = False
        if cls.Losses_times == 20000:
            for param in cls.discriminator.parameters():
                param.requires_grad = True
        X = X.cuda()
        Z = Z.cuda()
        real_data = X
        fake_data = cls.generator(Z)

        cls.d_optimizer.zero_grad()
        cls.g_optimizer.zero_grad()
        real_y, real_z = cls.discriminator(real_data)

        fake_y_ng, fake_z_ng = cls.discriminator(fake_data.detach())
        fake_y, fake_z = cls.discriminator(fake_data)

        real_y_mean = torch.mean(real_y, 1, keepdim=True)
        fake_y_ng_mean = torch.mean(fake_y_ng, 1, keepdim=True)
        fake_y_mean = torch.mean(fake_y, 1, keepdim=True)

        t1_loss = real_y_mean - fake_y_ng_mean
        # t2_loss = fake_y_mean - fake_y_ng_mean
        z_corr = correlation(torch.reshape(Z, fake_z_ng.shape), fake_z_ng)

        qp_loss = t1_loss ** 2 / (2 * cls.gp_lambda * torch.norm(real_y - fake_y_ng, 2, keepdim=True))

        d_loss = -(real_y - fake_y_ng - qp_loss)
        d_loss = torch.squeeze(d_loss)
        d_loss = torch.mean(d_loss)
        d_loss = d_loss + z_corr
        # d_loss.backward()
        g_loss = real_y_mean - fake_y_mean
        g_loss = torch.mean(torch.squeeze(g_loss))
        total_loss = d_loss + g_loss
        total_loss.backward()
        cls.g_optimizer.step()
        cls.d_optimizer.step()
        cls.DLosses.append(d_loss.item())
        cls.GLosses.append(g_loss.item())

        return np.mean(cls.DLosses), np.mean(cls.GLosses)


def main():
    from train_gan import load_stock_data, train_model
    torch.manual_seed(46)
    np.random.seed(46)
    target_stocks = pd.read_csv(os.path.join(DATA_PATH, 'targetStocks.csv'))['id']
    path = "E:/Codes/UndergraduateFinalDesign/all_stock_data/stock data"
    train_model(OGANQPTrainer, load_stock_data(path, target_stocks, **HYPER_PARAMETERS))


def check_gen():
    from train_gan import load_stock_data, load_checkpoint
    target_stocks = [600519]
    path = "E:/Codes/UndergraduateFinalDesign/all_stock_data/stock data"
    dataset = load_stock_data(path, target_stocks, **HYPER_PARAMETERS).dataset
    test_data, _ = dataset[1]
    trainer = load_checkpoint('OGANQPTrainer_checkpoint_28.pt')
    trainer: OGANQPTrainer
    with torch.no_grad():
        encoder = trainer.discriminator
        encoder.eval()
        decoder = trainer.generator
        decoder.eval()
        _, ans = encoder(test_data)
        ans = torch.reshape(ans, [40, -1])
        from src.Utils.MyUtil import seq_normalization
        ans = seq_normalization(ans)
        rebuild = seq_normalization(decoder(ans))
        loss = nn.MSELoss()(torch.flatten(rebuild), torch.flatten(test_data)).item()
        print(f"loss {loss}")
        rebuild = rebuild.cpu().numpy()
    test_data = test_data.cpu().numpy()
    for _ in range(test_data.shape[1]):
        plt.figure(f"ele {_}")
        plt.plot(rebuild[:, _])
        plt.plot(test_data[:, _])
        plt.legend(["rebuild", "true"])
    plt.show()


if __name__ == "__main__":
    main()
