# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from .feature_base import feature_generator

# 定义 LSTM 模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        # 初始化隐藏状态和细胞状态
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        # print(h0.shape, c0.shape)
        # LSTM 层
        out, _ = self.lstm(x, (h0, c0))
        # 取最后一个时间步的输出
        out = out[:, -1, :]
        # 全连接层
        out = self.fc(out)
        return out


class LSTMTrainer():
    def __init__(self, data, input_size=1, hidden_size=50, num_layers=1, output_size=1):
        print("已初始化LSTM 模型训练")
        self.data = data
        print("已完成数据加载：", self.data.shape)
        self.model = None
        self.criterion = None
        self.optimizer = None
        self.num_epochs = 300
        self.learning_rate = 0.05   # 根据你的学习率调整
        self.input_size = input_size         # 根据你的数据特征数量调整
        self.hidden_size = hidden_size     # 根据你的隐藏层节点数量调整
        self.num_layers = num_layers     # 根据你的隐藏层数调整
        self.output_size = output_size    # 根据你的输出维度调整
        self.model = LSTMModel(self.input_size, self.hidden_size, self.num_layers, self.output_size)

    def train(self):
        # 数据清洗
        clean_data = self.clean()
        # 特征工程
        feature_generate = feature_generator(clean_data)
        feature_df, label_df = feature_generate.feature_engineer()

        # print(label_df, feature_df)
        # print(feature_df.head(10))
        print("特征工程完成，当前数据")
        print(feature_df)
        scaler_feature = StandardScaler()
        scaler_label = StandardScaler()
        # 数据归一
        feature_df_standard = scaler_feature.fit_transform(feature_df)
        label_df_standard = scaler_label.fit_transform(label_df)
        print(feature_df_standard.shape, label_df_standard.shape)
        print("===========")
        # 拆分数据，tensor化
        train_data, test_data = self.data_spliter(feature_df_standard)
        train_label, test_label = self.data_spliter(label_df_standard)
        # 训练模型
        self.model_train(train_data, train_label)
        # 验证模型
        self.evaluate(test_data, test_label, scaler_label, clean_data["成交价"])

    def data_spliter(self, data, ratio=0.8):
        # PyTorch Tensor
        standard_data_tensor = torch.from_numpy(data).float()
        train_data, test_data = torch.split(standard_data_tensor, [int(len(data) * ratio), len(data) - int(len(data) * ratio)])
        return train_data, test_data

    def clean(self):
        print("-----数据清洗------")
        print("数据清洗前：", self.data.shape)
        self.data = self.data.dropna(axis=0)
        self.data = self.data[~(self.data["成交价"] == 0)]
        print("完成数据行清洗：", self.data.shape)
        self.data = self.data.drop(labels=["BS",  "filetime", "code"], axis=1)
        print("完成数据列清洗，当前数据", self.data.shape)
        return self.data

    def model_train(self, train_data, train_label):
        print("-----开始训练模型------")
        # print(train_data.shape)
        train_data = train_data.reshape(-1, train_data.shape[1], 1)
        train_label = train_label.reshape(-1, 1)
        print(train_data.shape)
        self.criterion = nn.MSELoss()
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        num_epochs = 5000
        criterion = nn.MSELoss()
        # 将dataframe 转化为 tensor
        # x, y = train_data[:, :-1], train_data[:, -1]
        for epoch in range(num_epochs):
            outputs = self.model(train_data)
            self.optimizer.zero_grad()
            loss = criterion(outputs, train_label)
            loss.backward()
            self.optimizer.step()
            if (epoch + 1) % 10 == 0:
                print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

    def predict(self, test_data):
        self.model.eval()
        with torch.no_grad():
            outputs = self.model(test_data)
        return outputs

    def evaluate(self, test_data, test_labels, scaler, original_price):
        self.model.eval()
        test_data = test_data.reshape(-1, test_data.shape[1], 1)
        with torch.no_grad():
            outputs = self.model(test_data)
            loss = self.criterion(outputs, test_labels)

        original_price = original_price.iloc[int(0.8*original_price.shape[0]):]
        original_price = original_price.reset_index(drop=True)

        outputs = scaler.inverse_transform(outputs)
        predict = original_price.iloc[0] + outputs.cumsum()

        # 可视化预测结果
        plt.figure(figsize=(10, 6))
        plt.plot(original_price, label='True Values')
        plt.plot(predict, label='Predicted Values')
        plt.title('True vs Predicted Values')
        plt.xlabel('Samples')
        plt.ylabel('Value')
        plt.legend()
        plt.show()
        return loss.item()

def LSTM_rt(data_df, model_name):
    model_path = os.getcwd()