import os.path

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from src.EmotionAnalyser.early_stopping import EarlyStopping
from src.EnvironmentVariables import MODELS_PATH, DATA_PATH
from src.Utils.MyUtil import stock_adjust
from src.TqSdk.count_feature import count_feature
from tqdm import tqdm
import pickle
from src.EnvironmentVariables import BASE_PATH

assert torch.cuda.is_available()
hyper_parameters = {
    'seq_length': 25,
    'learning_rate': 0.005,
    'batch_size': 32,
    'epoch': 1000
}


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    # random.seed(seed)
    # torch.backends.cudnn.deterministic = True


# 数据预处理,按照seq_length拼接起来
def montage_data(df1,
                 seq_length=hyper_parameters['seq_length'],
                 isIndex=False,
                 **kwargs):
    label_name = ['turnover', 'change']
    train_col = ['open', 'close', 'high', 'low', 'volume', 'turnover', 'rsi', 'diff']
    if not isIndex:
        df1 = stock_adjust(df1)
    target_feature = {
        "MACD": [12, 26, 9],
        "RSI": [30]
    }
    df1 = count_feature(df1, target_feature)
    for i in range(63, len(df1) - 64):
        close = df1.iloc[i - 63:i, df1.columns.get_loc("close")]
        std = np.std(np.array(close))
        df1.loc[i, 'diff'] = df1.loc[i, 'diff'] / std

    X, Y = [], []
    for i in range(len(df1) - seq_length - 2):
        seq = df1[i:i + seq_length + 1]
        about_price_col = ['open', 'close', 'high', 'low']
        about_price = seq.iloc[:-1].loc[:, about_price_col].values

        scaler = MinMaxScaler()
        fit_ans = scaler.fit_transform(about_price)
        fit_ans = np.vstack([fit_ans,
                             scaler.transform(
                                 seq.iloc[-1].loc[about_price_col].values[np.newaxis, :])])

        need_scaler_col = ['volume', 'turnover', 'rsi']

        # 复制DataFrame对象seq
        data = seq.copy()
        data[about_price_col] = fit_ans

        # 标准化need_scaler_col列
        for col_name in need_scaler_col:
            scaler = MinMaxScaler()
            # print(len(data))
            tmp = data.loc[:len(data) - 1, col_name].values
            data.loc[:len(data) - 1, col_name] = scaler.fit_transform(
                tmp.reshape(-1, 1))

            # 使用iloc选择最后一行数据
            last_row = data.iloc[-1]
            last_value = scaler.transform(last_row[col_name].reshape(1, -1))[0]

            # 将标准化后的值赋值回去
            data.loc[data.index[len(data) - 1], col_name] = last_value

        # 将修改后的数据写回原来的DataFrame对象seq
        seq = data

        if seq.isna().any().any():
            continue
        single_x = seq.iloc[:-1].loc[:, train_col]
        single_y = seq.loc[:, label_name].iloc[-1]
        X.append(single_x)
        Y.append(single_y)

    return np.array(X), np.array(Y)


# 构建数据集
class TimeSeriesDataset(Dataset):
    def __init__(self, X: np.ndarray, y: np.ndarray, window=60):
        self.X = torch.FloatTensor(X).cuda()
        self.y = torch.FloatTensor(y).cuda()
        self.window = window

    def __len__(self):
        return len(self.X) - self.window

    def __getitem__(self, idx):
        return self.X[idx:idx + self.window], self.y[idx + self.window]


def creat_dataloader(datasets:dict, shuffle=False):
    train_dataset=[]
    test_dataset = []
    train_col = [
        'open', 'high', 'low', 'close',
        'change', "bbi_signal", "bias_signal", "cci_signal",
        "dma_signal", "expma_signal", "kdj_signal", "macd_signal",
        "mfi_signal", "mi_signal", "mtm_signal", "priceosc_signal",
        "psy_signal", "roc_signal"
    ]
    #TODO
    #for df,label in datasets.values():
    #    df:pd.DataFrame
    #    train_data =
    #return DataLoader(dataset, batch_size=hyper_parameters['batch_size'], shuffle=shuffle)


class LSTM4Market(nn.Module):
    def __init__(self, input_size=8, output_size=2, dropout=0.2, **kwargs):
        super(LSTM4Market, self).__init__()
        self.lstm0 = nn.LSTM(input_size=input_size, hidden_size=64, num_layers=1, batch_first=True)
        self.lstm1 = nn.LSTM(input_size=64, hidden_size=32, num_layers=1, batch_first=True)
        self.lstm = (self.lstm0, self.lstm1)
        self.fc = nn.Sequential(
            nn.Linear(32, 64),
            nn.LeakyReLU(),
            nn.Dropout(dropout)
        )
        self.fc_criterion = nn.HuberLoss()
        self.fc_reg = nn.Sequential(
            nn.Linear(64, output_size)
        )
        self.fc_sign = nn.Sequential(
            nn.Linear(64, 1),
            nn.Softmax()
        )

    def forward(self, x):
        output, _ = self.lstm[0](x)
        output, _ = self.lstm[1](output)
        x = self.fc(output[:, -1, :])

        pred = self.fc_reg(x)
        sign = self.fc_sign(x)
        return pred, sign


def load_data(dir_path, target_stock=None):
    if target_stock is None:
        target_stock = [600519]
    usecols = ["date",
               "open",
               "high",
               "low",
               "close",
               "volume",
               "turnover",
               "change",
               "adjust_price"]
    files_path = [os.path.join(dir_path, f"sh{f}.csv") for f in target_stock]
    dfs = []
    for _ in files_path:
        try:
            dfs.append(pd.read_csv(_, usecols=usecols, parse_dates=['date']).sort_values(by='date').dropna())
            if len(dfs[-1]) < 1000:
                print(f"sh{_} 在市时间较短")
                dfs.pop(-1)
        except Exception as e:
            print(f"read file {_} fail with error {e}")

    train_x, train_y, test_x, test_y = [], [], [], []
    for i, df in tqdm(enumerate(dfs)):
        t1, t2 = montage_data(df)
        with open(os.path.join(DATA_PATH, 'standardData', f'data{i}.pickle'), 'wb') as f:
            standardData = {'X': t1, 'y': t2}
            pickle.dump(standardData, f)
        assert len(t1) > 600
        train_x.append(t1[:-600])
        train_y.append(t2[:-600])

        test_x.append(t1[-600:])
        test_y.append(t2[-600:])
    train_x, train_y, test_x, test_y = [np.concatenate(_, axis=0) for _ in [train_x, train_y, test_x, test_y]]
    return train_x, train_y, test_x, test_y


def train_lstm(train_loader: DataLoader, test_loader: DataLoader):
    model = LSTM4Market().cuda()
    optimizer = optim.AdamW(model.parameters(), lr=hyper_parameters['learning_rate'])

    model.cuda()

    num_epochs = hyper_parameters['epoch']

    early_stop = EarlyStopping(patience=50)
    total_y_losses = [[], []]
    total_sign_losses = [[], []]
    for epoch in range(num_epochs):
        model.train()
        y_losses = []
        pbar = tqdm(desc=f"epoch:{epoch}/{num_epochs}", total=len(train_loader))
        for i, (inputs, labels) in enumerate(train_loader):
            pred = model(inputs)

            loss = model.fc_criterion(pred, labels)
            y_losses.append(loss.item())

            optimizer.zero_grad()

            loss.backward()

            optimizer.step()
            pbar.set_postfix_str(f"total loss:{np.mean(y_losses):.4f}; batch loss:{loss.item():.4f}")
            pbar.update(1)
        pbar.close()
        model.eval()
        test_losses = []
        with torch.no_grad():
            for i, (inputs, labels) in enumerate(test_loader):
                pred = model(inputs)
                loss = model.fc_criterion(pred, labels)
                test_losses.append(loss.item())

        total_y_losses[0].append(np.mean(y_losses))
        total_y_losses[1].append(np.mean(test_losses))

        print(
            f'Epoch [{epoch + 1}/{num_epochs}], Loss: {np.mean(y_losses):.4f}, Test Loss: {np.mean(test_losses):.4f} ')
        early_stop(np.mean(test_losses), model)
        if early_stop.early_stop:
            model.load_state_dict(early_stop.load_checkpoint())
            break
    torch.save(model.parameters(), os.path.join(MODELS_PATH, "LSTM_model", "LSTMmodel.pt"))
    show_loss(*total_y_losses, title='Pred Loss')
    show_loss(*total_sign_losses, title='Sign loss')


def show_loss(train, test, title='loss'):
    plt.figure(title)
    x = range(len(train))
    plt.plot(x, train)
    plt.plot(x, test)
    plt.legend(['train', 'test'])


def main():
    dir_path = os.path.join(BASE_PATH, 'data/preProcessedIndexData/sz50stockSignal')
    files_name = os.listdir(dir_path)  # ['sh600010.csv']
    files_path = [os.path.join(dir_path, f) for f in files_name]
    datasets = dict()
    for i, f in enumerate(files_path):
        df = pd.read_csv(f, parse_dates=['date'])
        df = df.sort_values('date').dropna()
        import talib as ta
        new_df = pd.DataFrame()
        new_df['ma5'] = ta.MA(df['close'].values, 5)
        df['ma5'] = new_df.values
        new_df.shift(-5, axis=0)
        df['ma5_future'] = new_df.values
        if len(df) < 200:
            continue
        label = df[['turnover', 'change']]
        label.loc[:, 'signal'] = np.where(df['ma5_future'] > df['ma5'], 1, 0)
        datasets[files_name[i].split(".")[0]] = [df, label]





if __name__ == '__main__':
    # setup_seed(610749002278200)
    # np.random.seed(3407)
    main()
    print(f'torch seed: {torch.seed()}')
    plt.show()
