import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from gan import Generator, Discriminator

import numpy as np
from torch.utils.data import Dataset, DataLoader

import os
from tqdm import tqdm
import pickle
from src.EnvironmentVariables import MODELS_PATH, DATA_PATH
from src.Utils.MyUtil import seq_normalization

HYPER_PARAMETERS = {
    "generator_input_size": 40,
    "num_historical_days": 40,
    "num_feature": 9,
    "dropout": 0.0,
    "LSTM_features": 48,
    "LSTM_hidden_size": 64,
    "discriminator_features": 24,
    "LSTM_num_layers": 3,
    "batch_size": 32
}


class GANTrainer:
    generator = Generator(**HYPER_PARAMETERS).cuda()
    discriminator = Discriminator(**HYPER_PARAMETERS).cuda()

    # 定义损失函数和优化器
    criterion = nn.BCELoss().cuda()
    d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
    g_optimizer = optim.Adam(generator.parameters(), lr=0.0001)

    DLosses = []
    GLosses = []
    Losses_times = 0

    wait = 0

    # 定义训练过程
    @classmethod
    def train(cls, X, Z, **kwargs):
        if len(cls.DLosses) % 1000 == 800:
            cls.DLosses = cls.DLosses[400:]
        if len(cls.GLosses) % 1000 == 800:
            cls.GLosses = cls.GLosses[400:]
        cls.Losses_times += 1

        X = X.cuda()
        Z = Z.cuda()
        real_data = X
        fake_data = seq_normalization(cls.generator(Z))

        real_labels = torch.ones(real_data.size(0), 1).cuda()
        fake_labels = torch.zeros(real_data.size(0), 1).cuda()

        if cls.wait == 0:
            # 训练判别器
            cls.d_optimizer.zero_grad()
            real_outputs = cls.discriminator(real_data)
            real_loss = cls.criterion(real_outputs, real_labels)

            fake_outputs = cls.discriminator(fake_data.detach())
            fake_loss = cls.criterion(fake_outputs, fake_labels)

            d_loss = real_loss + fake_loss + 0.0001 * torch.sum(
                torch.tensor([torch.norm(p) for p in cls.discriminator.parameters()])) / len(
                list(cls.discriminator.parameters()))
            d_loss.backward()
            cls.d_optimizer.step()
            cls.DLosses.append(d_loss.item())
            if np.mean(cls.DLosses) < 0.2:
                cls.wait = 10
            if 'pbar' in kwargs.keys():
                pbar = kwargs['pbar']
                tmp = pbar.postfix.split(';')
                if len(tmp) > 0:
                    tmp[1] = f"real_loss={real_loss.item()},fake_loss={fake_loss.item()}"
                pbar.set_postfix_str(";".join(tmp))
        else:
            if 'pbar' in kwargs.keys():
                pbar = kwargs['pbar']
                tmp = pbar.postfix.split(';')
                if len(tmp) > 0:
                    tmp[-1] = f"wait={cls.wait}"
                pbar.set_postfix_str(";".join(tmp))

            cls.d_optimizer.zero_grad()
            cls.wait -= 1
        if cls.Losses_times < 1000:
            return np.mean(cls.DLosses), -1

        # 训练生成器

        cls.g_optimizer.zero_grad()
        fake_outputs = cls.discriminator(fake_data)
        g_loss = cls.criterion(fake_outputs, real_labels)
        g_loss.backward()
        cls.g_optimizer.step()

        cls.GLosses.append(g_loss.item())

        if np.mean(cls.GLosses) >= 2.5:
            cls.wait = 50
        return np.mean(cls.DLosses), np.mean(cls.GLosses)


class WGANTrainer:
    generator = Generator(**HYPER_PARAMETERS).cuda()
    discriminator = Discriminator(isWGAN=True, **HYPER_PARAMETERS).cuda()

    # 定义损失函数和优化器
    d_optimizer = optim.RMSprop(discriminator.parameters(), lr=0.00008)
    g_optimizer = optim.RMSprop(generator.parameters(), lr=0.0001)

    clip_value = 0.01

    DLosses = []
    GLosses = []
    Losses_times = 0

    wait = 0

    # 定义训练过程
    @classmethod
    def train(cls, X, Z, **kwargs):
        if cls.Losses_times % 1000 == 800:
            cls.DLosses = cls.DLosses[400:]
            cls.GLosses = cls.GLosses[400:]
        cls.Losses_times += 1

        X = X.cuda()
        Z = Z.cuda()

        # 训练判别器
        cls.d_optimizer.zero_grad()
        fake_data = cls.generator(Z).detach()
        d_loss = -torch.mean(cls.discriminator(X)) + torch.mean(cls.discriminator(fake_data))  # 相对于BCE loss 不取log
        d_loss.backward()
        cls.d_optimizer.step()
        cls.DLosses.append(d_loss.item())

        for p in cls.discriminator.parameters():
            p.data.clamp_(-cls.clip_value, cls.clip_value)

        cls.g_optimizer.zero_grad()
        fake_data = cls.generator(Z)
        g_loss = -torch.mean(cls.discriminator(fake_data))
        g_loss.backward()
        cls.g_optimizer.step()
        cls.GLosses.append(g_loss.item())

        return np.mean(cls.DLosses), np.mean(cls.GLosses)


def cal_gp(D, real_seq, fake_seq):  # 定义函数，计算梯度惩罚项gp

    r = torch.rand(size=(real_seq.shape[0], 1, 1))  # 真假样本的采样比例r，batch size个随机数，服从区间[0,1)的均匀分布

    r = r.cuda()  # r加载到GPU
    x = (r * real_seq + (1 - r) * fake_seq).requires_grad_(True)  # 输入样本x，由真假样本按照比例产生，需要计算梯度

    with torch.backends.cudnn.flags(enabled=False):
        d = D(x)  # 判别网络D对输入样本x的判别结果D(x)
    fake = torch.ones_like(d)  # 定义与d形状相同的张量，代表梯度计算时每一个元素的权重

    fake = fake.cuda()  # fake加载到GPU
    g = torch.autograd.grad(  # 进行梯度计算
        outputs=d,  # 计算梯度的函数d，即D(x)
        inputs=x,  # 计算梯度的变量x
        grad_outputs=fake,  # 梯度计算权重
        create_graph=True,  # 创建计算图
        retain_graph=True  # 保留计算图
    )[0]  # 返回元组的第一个元素为梯度计算结果
    gp = ((g.norm(2, dim=1) - 1) ** 2).mean()  # (||grad(D(x))||2-1)^2 的均值
    return gp  # 返回梯度惩罚项gp


class WGANGPTrainer:
    generator = Generator(**HYPER_PARAMETERS).cuda()
    discriminator = Discriminator(isWGAN=True, **HYPER_PARAMETERS).cuda()

    d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
    g_optimizer = optim.Adam(generator.parameters(), lr=0.000055)

    a = 10  # 梯度惩罚项系数

    DLosses = []
    GLosses = []
    Losses_times = 0

    @classmethod
    def train(cls, X, Z, **kwargs):
        if cls.Losses_times % 1000 == 800:
            cls.DLosses = cls.DLosses[400:]
            cls.GLosses = cls.GLosses[400:]
        cls.Losses_times += 1
        cls.d_optimizer.zero_grad()
        X = X.cuda()
        Z = Z.cuda()
        fake_seq = cls.generator(Z).detach()
        gp = cal_gp(cls.discriminator, X, fake_seq)
        loss_d = -torch.mean(cls.discriminator(X)) + torch.mean(cls.discriminator(fake_seq)) + cls.a * gp
        loss_d.backward()
        cls.d_optimizer.step()
        cls.DLosses.append(loss_d.item())

        cls.g_optimizer.zero_grad()
        gen_seq = cls.generator(Z)
        loss_g = -torch.mean(cls.discriminator(gen_seq))
        loss_g.backward()
        cls.g_optimizer.step()
        cls.GLosses.append(loss_g.item())

        return np.mean(cls.DLosses), np.mean(cls.GLosses)


def montage_data(df_list, seq_length):
    X = []
    for single_df in df_list:
        if len(single_df) < seq_length:
            continue
        single_df = single_df.sort_values(by='date')
        tar = single_df[["open",
                         "high",
                         "low",
                         "close",
                         "volume",
                         "PE_TTM",
                         "PS_TTM",
                         "PC_TTM",
                         "PB"]]
        single_X = []
        for i in range(len(tar) - seq_length - 1):
            single_X.append(tar[i:i + seq_length])
        single_X = np.array(single_X)
        if single_X.shape[-1] == 9:
            X.append(single_X)
    return np.concatenate(X, axis=0)


class StockDataset(Dataset):
    def __init__(self, df_list, seq_length=25, **kwargs):
        """

        :param df_list:
        """
        from src.Utils.MyUtil import seq_normalization
        X = montage_data(df_list, seq_length)
        X = torch.FloatTensor(X).cuda()
        self.X = seq_normalization(X)
        self.kwargs = kwargs

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return self.X[idx], sample_Z(**self.kwargs)


def sample_Z(**kwargs):
    if "num_historical_days" in kwargs.keys() and "generator_input_size" in kwargs.keys():
        return torch.FloatTensor(np.random.uniform(-1., 1., size=(
            kwargs["num_historical_days"],
            kwargs["generator_input_size"])))
    else:
        return torch.FloatTensor(np.random.uniform(-1., 1., size=(
            HYPER_PARAMETERS["num_historical_days"],
            HYPER_PARAMETERS["generator_input_size"])))


# 训练GAN模型
def train_model(trainer, dataloader: DataLoader):
    num_epochs = 1000

    # trainer.generator.load_state_dict(load_checkpoint_attr('generator', 'GANTrainer_checkpoint_0.pt').state_dict(),
    #                                  strict=False)
    # trainer.discriminator.load_state_dict(load_checkpoint_attr('discriminator', 'GAN_checkpoint_12.pt').state_dict(),
    #                                      strict=False)
    for epoch in range(num_epochs):
        pbar = tqdm(total=len(dataloader), desc=f'Epoch {epoch + 1}/{num_epochs}')
        pbar.set_postfix_str(";")
        for i, data in enumerate(dataloader):
            X, Z = data
            d_loss, g_loss = trainer.train(X, Z, pbar=pbar)
            tmp = pbar.postfix.split(';')
            tmp[0] = f"D_loss={d_loss},G_loss:{g_loss}"
            pbar.set_postfix_str(";".join(tmp))  # 显示判别网络D和生成网络G的损失
            pbar.update(1)
        pbar.close()
        d_losses = np.mean(trainer.DLosses)
        g_losses = np.mean(trainer.GLosses)
        if epoch % 2 == 0:
            print('Epoch [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}'.format(epoch, num_epochs, d_losses,
                                                                         g_losses))
        if epoch % 4 == 0:
            save_checkpoint(trainer, f"{trainer.__name__}_checkpoint_{epoch}.pt")


def load_stock_data(dir_path: str, target_stock=None, is_index=False,
                    shuffle=True,
                    **kwargs):
    from src.Utils.MyUtil import stock_adjust
    seq_length = kwargs['num_historical_days']
    files_path = [os.path.join(dir_path, f) for f in os.listdir(dir_path)[1000:1100]] if target_stock is None else \
        [os.path.join(dir_path, f"sh{f}.csv") for f in target_stock]
    if is_index:
        files_path = [os.path.join(dir_path, f"{f}.csv") for f in target_stock]
    usecols = ["date",
               "open",
               "high",
               "low",
               "close",
               "volume",
               "PE_TTM",
               "PS_TTM",
               "PC_TTM",
               "PB"]
    if not is_index:
        usecols += ["adjust_price"]
    if not is_index:
        df_list = [stock_adjust(pd.read_csv(tar_path, usecols=usecols).dropna()) for tar_path in files_path]
    else:
        df_list = [pd.read_csv(tar_path, usecols=usecols).dropna() for tar_path in files_path]
    dataset = StockDataset(df_list, seq_length, **kwargs)
    dataloader = DataLoader(dataset, batch_size=kwargs['batch_size'] if 'batch_size' in kwargs.keys() else 32,
                            shuffle=shuffle)
    return dataloader


def save_checkpoint(trainer, name: str):
    tar = {attr: getattr(trainer, attr) for attr in dir(trainer) if "__" not in attr}
    tar["__obj__"] = trainer
    with open(os.path.join(MODELS_PATH, "GAN_model", name), "wb") as f:
        pickle.dump(tar, f)
    print("checkpoint ", name, " has saved")
    # torch.save(trainer, )


def load_checkpoint(name: str):
    with open(os.path.join(MODELS_PATH, "GAN_model", name), "rb") as f:
        dir4trainer = pickle.load(f)
        dir4trainer: dict
        trainer = dir4trainer["__obj__"]
        for attr, val in dir4trainer.items():
            if attr == '__obj__':
                continue
            setattr(trainer, attr, val)
    return trainer


def load_checkpoint_attr(attr, file_name):
    with open(os.path.join(MODELS_PATH, "GAN_model", file_name), "rb") as f:
        dir4trainer = pickle.load(f)
    return dir4trainer[attr]


def main():
    torch.manual_seed(46)
    np.random.seed(46)
    target_stocks = pd.read_csv(os.path.join(DATA_PATH, 'targetStocks.csv'))['id']
    path = "E:/Codes/UndergraduateFinalDesign/all_stock_data/stock data"
    train_model(GANTrainer, load_stock_data(path, target_stocks, **HYPER_PARAMETERS))


if __name__ == "__main__":
    main()
