from tools.change_cwd_to_main import change_cwd_to_main

change_cwd_to_main()


from tools.visualize_data import visualize_data
from tools.visualize_data import show_data_density_by_kde
from bdtime import show_json, show_ls
from bdtime import tt

import torch.nn as nn
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import plotly.express as px
import matplotlib.pyplot as plt
import plotly.express as px

from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_percentage_error


def sigmoid(x):
    """
    对数据进行 Sigmoid 操作
    :param x: 输入数组
    :return: Sigmoid 变换后的数组
    """
    return 1 / (1 + np.exp(-x))


def describe_data(data):
    v_df = pd.DataFrame(data, columns=[predict_column])
    describe = v_df.describe()

    # --- 4分位数数值分布的占比 和 全数值分布的占比, 也就是 中间50%数据的范围 比 全部数据的范围
    range_p14_p34 = describe.loc["75%"][0] - describe.loc["25%"][0]
    range_min_max = describe.loc["max"][0] - describe.loc["min"][0]
    print(f'--- range_p14_p34 / range_min_max: {range_p14_p34 / range_min_max : .4f}')

    return describe


# dtype = {
#     'Price': 'float'           # 将 Price 列转换为浮点数
# }
columns_to_convert = ['Price', 'High', 'Low', 'Open']
df = pd.read_csv('data/Gold Futures Historical Data_20130101-20241112.csv',
                 converters={col: lambda x: float(x.replace(',', '')) for col in columns_to_convert},
                 parse_dates=['Date', 'Change %'])


import seaborn as sns
from tools.outliers_processer import winsorize_outliers_with_mad
from tools.outliers_processer import winsorize_outliers_with_sigma


# winsorize_outliers_with_mad(df, ['Prices', 'High', 'Low', 'Open'], n=3)
# df['Price'].describe()
# _df['Price'].describe()
# df['Price'].describe(include='all')

# summary = df.describe()
# print(summary)


def inverse_transform(predict_scaler, x):
    if isinstance(predict_scaler, list):
        for predict_scaler_i in predict_scaler[::-1]:
            x = predict_scaler_i.inverse_transform(x)
    else:
        x = predict_scaler.inverse_transform(x)
    return x
# res = inverse_transform(predict_scaler, out)


flag__drop_change = 0
if flag__drop_change:
    df.drop(['Change %'], axis=1, inplace=True)
else:
    df['Change %'] = df['Change %'].str.replace('%', '').astype(float) / 100

flag__drop_vol = 1
if flag__drop_vol:
    df.drop(['Vol.'], axis=1, inplace=True)
else:
    df['Vol.'] = df['Vol.'].str.replace('K', '').astype(float)
    df['Vol.'].fillna(0, inplace=True)


df = df.sort_values(by=['Date'], ascending=True)
df.reset_index(drop=True, inplace=True)

# winsorize_outliers_with_mad(df, ['Change %'], n=3)
# winsorize_outliers_with_sigma(df, ['Change %'], n=3)

# --- 离群值处理
# df = winsorize_outliers_with_mad(df, ['Change %'], n=3)
# _df = df.copy()
#
# df = winsorize_outliers_with_sigma(df, ['Change %'], n=3)
# print(f"df['Change %'].describe(): {df['Change %'].describe()}")
# print(f"_df['Change %'].describe(): {_df['Change %'].describe()}")
#
# df['Change %'] = df['Change %'].round(4)
# df.to_csv('data/gold_price.csv', index=False, encoding='utf-8')

df = df[df.Date.dt.year >= 2024]
# df = df[df.Date.dt.year >= 2020]
# df = df[np.logical_and(df.Date.dt.month.isin([7, 8, 9, 10, 11]), df.Date.dt.year >= 2024)]
desc = f'--- df.shape: {df.shape}'
print(desc)


# tt.tqdm_sleep(desc)

# df[df.Date.dt.year >= 2023].shape

# df = df[df.Date.dt.year >= 2023]  # 2023~2024年的数据
# test_size = df[df.Date.dt.year == 2024].shape[0]
# test_size = df[df.Date.dt.month.isin([7, 8, 9, 10, 11])].shape[0]
test_size = df[np.logical_and(df.Date.dt.month.isin([7, 8, 9, 10, 11]), df.Date.dt.year >= 2024)].shape[0]

# test_size = test_size // 2
# test_size = 100


if 1:  # df.drop(['Date'])
    timestamps = df['Date'].view('int64')
    time_scaler = MinMaxScaler()
    time_scaler.fit(timestamps.values.reshape(-1, 1))
    timestamps = time_scaler.transform(timestamps.values.reshape(-1, 1))
    df['timestamps'] = timestamps
    df.drop(['Date'], axis=1, inplace=True)


# scaler = MinMaxScaler()
# df = pd.DataFrame(scaler.fit_transform(df), columns=df.columns)

scaler_dc = {'timestamps': time_scaler}
for col in df.columns:
    if col != 'timestamps':
        # scaler_dc[col] = MinMaxScaler()
        # scaler_dc[col] = StandardScaler()
        scaler_ls = [StandardScaler(), MinMaxScaler()]
        scaler_dc[col] = scaler_ls

        for scaler_i in scaler_ls:
            df[col] = scaler_i.fit_transform(df[col].values.reshape(-1, 1))

df['Change %'].describe()
# df = df.sort_values(by=['Date'], ascending=False)

# df.head()
# df.duplicated().sum()
# df.isnull().sum().sum()

train_size = df.shape[0] - test_size
print(f'====== df.shape: {df.shape}, test_size: {test_size}, train_size: {train_size}')


# --- train data and scaler

window_size = 14
# window_size = 365 * 2
future_interval_days = 7  # 未来间隔时间, 预测第n天的价格

train_data = df[:-test_size]
train_data.describe()
print('------ train_data.columns: {}'.format(train_data.columns))

train_data = np.array(train_data)
input_dim = train_data.shape[1]

# predict_column = 'Price'
predict_column = 'Change %'
y_i = df.columns.get_loc(predict_column)
print(f'-------------------- y_i: {y_i} --- predict column: {predict_column}'
      f', future_interval_days: {future_interval_days}')

change_column_name = "Change %"
change_column_index = df.columns.get_loc(change_column_name)

# factories_column = ['Price', 'Open', 'High', 'Low', 'Volume']
# x_ids = [df.columns.get_loc(c) for c in factories_column]


X_train = []
y_train = []

prices_train = []
price_column_name = 'Price'
price_column_index = df.columns.get_loc(price_column_name)

for i in range(window_size, len(train_data)):
    # i = window_size
    # train_data[i - window_size:i, 0]
    # train_data[i - window_size:i, :].shape
    train_data[i - window_size:i, :].shape
    train_data[i, y_i]
    train_data[i, price_column_index]
    df.shape

    X_train.append(train_data[i - window_size:i, :])
    y_train.append(train_data[i, y_i])
    prices_train.append(train_data[i, price_column_index])

    # X_train.append(train_data[i - window_size:i, 0])
    # y_train.append(train_data[i, 0])

# scaler.data_max_
# scaler.data_min_ + scaler.data_range_
# print("=== scaler info:", scaler.data_min_, scaler.data_range_, scaler.data_max_)
# print("=== scaler min-max:\n", df.columns.tolist(), '\n', scaler.data_min_, '\n', scaler.data_max_)


# test_data = df.Price[-test_size - window_size:]
test_data = df[-test_size - window_size:]
test_data.describe()
test_data = np.array(test_data)
# test_data = scaler.transform(test_data.values.reshape(-1, 1))

train_data.shape, test_data.shape

# --- test data
X_test = []
y_test = []

prices_test = []


for i in range(window_size, len(test_data)):
    X_test.append(test_data[i - window_size:i, :])
    y_test.append(test_data[i, y_i])
    prices_test.append(test_data[i, price_column_index])
    # prices_train.append(train_data[i, price_column_index])


# --- conv to np format
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
print(
    f"*** shapes --- X_train: {X_train.shape}, X_test: {X_test.shape}, y_train.shape: {y_train.shape}, y_test.shape: {y_test.shape}")

if future_interval_days > 1:
    assert future_interval_days > 1, f'*** 预测未来第 {future_interval_days} 天? 必须正整数类型, 别乱填...'
    assert future_interval_days < window_size, f'*** future_interval_days should < window_size!'
    # X_train[:, :-(future_interval_days - 1), :].shape

    X_train = X_train[:, :-(future_interval_days - 1), :]
    X_test = X_test[:, :-(future_interval_days - 1), :]

    if predict_column == change_column_name and future_interval_days != 1:
        """这里处理下`百分比变动`问题, 预测[未来n天的累积变化量], 而不是[未来第n天的变化量]"""

        change_scale_size = 1  # 缩放比例, 仅对`Change %`生效

        # --- 计算新的`Change %`列 --- train
        last_prices_train = X_train[:, -1, price_column_index]
        prices_train = np.array(prices_train)

        price_scaler = scaler_dc.get(price_column_name)
        last_prices_train = inverse_transform(price_scaler, last_prices_train.reshape(-1, 1))
        prices_train = inverse_transform(price_scaler, prices_train.reshape(-1, 1))
        changes_train = (prices_train - last_prices_train) / last_prices_train * change_scale_size
        _changes_train = changes_train.copy()

        n_of_outliers = 0

        if n_of_outliers != 0:
            print('------ changes_train --- winsorize_outliers_with_mad')
            changes_train, outlier_caches = winsorize_outliers_with_mad(changes_train, columns=[change_column_name], n=n_of_outliers)
            # describe_data(changes_train * 100)
            print()

        # winsorize_outliers_with_mad(np.array([1, 2, 3]), columns=[change_column_name], n=3, caches=outlier_caches)

        # visualize_data(changes_train, chart_type="scatter", title='visualize_data of changes_train')
        # describe_data(changes_train)
        # show_data_density_by_kde(changes_train, 'changes_train')

        changes_train = np.array(changes_train)
        # --- 是否 标准化 & 归一化
        change_scaler = [StandardScaler(), MinMaxScaler()]
        # change_scaler = [MinMaxScaler()]
        # change_scaler = []
        for scaler_i in change_scaler:
            changes_train = scaler_i.fit_transform(changes_train.reshape(-1, 1))
        scaler_dc[change_column_name] = change_scaler
        y_train = changes_train.reshape(-1)

        # --- 计算新的`Change %`列 --- test
        last_prices_test = X_test[:, -1, price_column_index]
        prices_test = np.array(prices_test)
        prices_test = inverse_transform(price_scaler, prices_test.reshape(-1, 1))
        last_prices_test = inverse_transform(price_scaler, last_prices_test.reshape(-1, 1))
        changes_test = (prices_test - last_prices_test) / last_prices_test * change_scale_size
        _changes_test = changes_test.copy()
        # changes_test = winsorize_outliers_with_mad(changes_test, 3)

        if n_of_outliers != 0:
            print('------ changes_test --- winsorize_outliers_with_mad')
            changes_test, _ = winsorize_outliers_with_mad(changes_test, columns=[change_column_name], n=n_of_outliers, caches=outlier_caches)
            # describe_data(changes_test * 100)
            print()

        # show_data_density_by_kde(changes_test, 'changes_test')

        # v_df = pd.DataFrame(changes_test, columns=[predict_column])
        # v_df.describe()

        changes_test = np.array(changes_test)
        for scaler_i in change_scaler:
            changes_test = scaler_i.transform(changes_test.reshape(-1, 1))
        y_test = changes_test.reshape(-1)

        # show_data_density_by_kde(y_train, 'y_train')
        # show_data_density_by_kde(y_test, 'y_test')

        # show_data_density_by_kde(np.hstack((y_test, y_train)), 'hstack')
        # np.hstack((y_train, y_test)).shape

        # v_df = pd.DataFrame(y_test, columns=[predict_column])
        # v_df.describe()

        X_train.shape, X_test.shape, y_train.shape, y_test.shape

        # def get_change(x1, x2):
        #     return (x2 - x1) / x1


shuffle = True
if shuffle:
    indices = np.arange(X_train.shape[0])
    np.random.shuffle(indices)
    X_train = X_train[indices]
    y_train = y_train[indices]

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
y_train = torch.FloatTensor(y_train)
y_test = torch.FloatTensor(y_test)


X_train.shape, X_test.shape, y_train.shape, y_test.shape


# --- model, experiment_name, lr


# region # --- LogisticRegression
def get_logistic_model():
    from tools.logistic import MultiLayerLogisticClassification, LogisticClassification, LogisticRegression
    bias = True
    model = LogisticRegression(in_features=window_size, out_features=1, bias=bias)
    return model


# endregion


# region # --- MyMoEClassifier
def get_moe_model(top_k=1):
    from tools.moe import MyMoEClassifier, Linear, Expert
    input_size = window_size
    output_size = 1
    num_experts = 64

    emb_size = 100
    w_importance = 0.01
    # w_importance = 0

    # expert_cls = Linear
    expert_cls = Expert
    model = MyMoEClassifier(input_size=input_size, experts=num_experts, top=top_k, emb_size=emb_size,
                            output_size=output_size, w_importance=w_importance, expert_cls=Linear)
    return model


# endregion


# region # --- LstmModel
def get_lstm_model():
    from tools.lstm import LstmModel
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=64, num_layers=2, dropout=0.5, bidirectional=False)

    # input_size=1, num_classes=1, hidden_size=32, num_layers=1, dropout=0.5 --- 0.989
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=16, num_layers=1, dropout=0.5, bidirectional=True)
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=32, num_layers=1, dropout=0.5, bidirectional=False)  # --- best
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=32, num_layers=1, dropout=0.5, bidirectional=False)  # --- best
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=32, num_layers=1, dropout=0.5, bidirectional=False)  # --- best
    model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=8, num_layers=1, dropout=0.5, bidirectional=True)  # --- best
    # model = LstmModel(input_size=input_dim, num_classes=1, hidden_size=64, num_layers=5, dropout=0.5, bidirectional=False)
    return model


# endregion


total_epoch = 300
# total_epoch = 100
# log_interval = 0.1
log_interval = 0.05

lr = 0.01
# experiment_name = 'LogisticRegression'
# experiment_name = 'MyMoEClassifier'
experiment_name = 'LstmModel'

if experiment_name == 'LogisticRegression':
    model = get_logistic_model()
elif experiment_name == 'MyMoEClassifier':
    top_k = 1
    model = get_moe_model(top_k=top_k)
elif experiment_name == 'LstmModel':
    model = get_lstm_model()
else:
    raise ValueError(f'Unknown experiment_name: {experiment_name}')

# --- loss_function and optimizer
from tools.my_loss import MeanSquaredLogScaledErrorLoss

is_classification = False
criterion = nn.BCELoss() if is_classification else nn.MSELoss()
# criterion = nn.BCELoss() if is_classification else MeanSquaredLogScaledErrorLoss(alpha=1, clamp=1, debug=False)


batch_size = 0


# criterion = nn.HuberLoss(delta=1.0)  # 使用 Huber Loss，delta 可调

# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# X_test.shape
# model(X_test[:3])

# scheduler_step_size = 0.1  # 每 n * total_epoch 个周期调整一次学习率
# scheduler_step_size = 0.03  # 每 n * total_epoch 个周期调整一次学习率
scheduler_step_size = log_interval
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step_size, gamma=0.1)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.1, verbose=True)  # 使用 ReduceLROnPlateau 调度器，监控验证损失，patience 为 10

scheduler = None
# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50, eta_min=0)  # 使用 CosineAnnealingLR 调度器，T_max 设置为 50，eta_min 设置为 0
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.1, verbose=True)  # 使用 ReduceLROnPlateau 调度器，监控验证损失，patience 为 10


from tools.my_trainer_and_test import train, test_by_evaluate_metrics

flag__show_baseline = True
if flag__show_baseline:

    tt.__init__()

    y_true = y_test

    baseline_dc = {}

    # --- 训练数据的平均
    y_pred = y_train.mean()
    y_pred = np.full(y_test.shape, y_pred)

    baseline__mean_train_y = test_by_evaluate_metrics(y_pred, y_true)
    baseline_dc['训练数据y的平均'] = baseline__mean_train_y

    # --- 直接预测为昨天
    y_pred = X_test[:, -1, y_i]
    baseline__yesterday = test_by_evaluate_metrics(y_pred, y_true)
    baseline_dc['昨天的y作为预测'] = baseline__yesterday

    # --- 过去k天的平均
    k = 3
    y_pred = X_test[:, -k:, y_i]
    y_pred = torch.mean(y_pred, dim=1)
    baseline__last_k = test_by_evaluate_metrics(y_pred, y_true)
    baseline_dc['过去k[3]天的y平均'] = baseline__last_k

    # --- 适用于`Change %`, 预测为0
    # y_pred = np.zeros(y_test.shape)
    # baseline__zero = test_by_evaluate_metrics(y_pred, y_true)
    # baseline_dc['预测为0'] = baseline__zero

    show_json(baseline_dc)
    print(f'====== test_cost_time: {tt.now()},  baseline_dc')


def run_train():
    print(f'\n=========== on training ==============')

    train((X_train, X_test, y_train, y_test), model, optimizer, criterion,
          total_epoch=total_epoch, log_interval=log_interval,
          experiment_name=experiment_name, save=True, is_classification=is_classification, scheduler=scheduler,
          scheduler_step_size=scheduler_step_size, batch_size=batch_size)

    print(f'\n====== baseline_dc:')
    show_json(baseline_dc)

    print(f'====== df.shape: {df.shape}, test_size: {test_size}, train_size: {train_size}\n')


if __name__ == '__main__':
    run_train()

    # def eval():
    #     model.eval()
    #     for i in range(fut_pred):
    #         seq = torch.FloatTensor(test_inputs[-train_window:])
