import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Dense, LSTM
import matplotlib.pyplot as plt
import os
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
import math
import shutil
# 获取当前脚本所在的绝对路径
script_dir = os.path.dirname(os.path.abspath(__file__))
print(f"脚本目录: {script_dir}")

# 强制使用CPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

print(f"NumPy version: {np.__version__}")
print(f"TensorFlow version: {tf.__version__}")
print(f"GPU available: {tf.config.list_physical_devices('GPU')}")

# 使用正确的NumPy对象检查
print("np.object_ check:", hasattr(np, 'object_'))

# 加载数据 - 使用绝对路径
data_path = os.path.join(script_dir, 'SH600519.csv')
print(f"数据路径: {data_path}")
maotai = pd.read_csv(data_path)

training_set = maotai.iloc[0:2426 - 300, 2:3].values  # 前(2426-300=2126)天的开盘价作为训练集,表格从0开始计数，2:3 是提取[2:3)列，前闭后开,故提取出C列开盘价
test_set = maotai.iloc[2426 - 300:, 2:3].values  # 后300天的开盘价作为测试集

# 归一化
sc = MinMaxScaler(feature_range=(0, 1))  # 定义归一化：归一化到(0，1)之间
training_set_scaled = sc.fit_transform(training_set)  # 求得训练集的最大值，最小值这些训练集固有的属性，并在训练集上进行归一化
test_set = sc.transform(test_set)  # 利用训练集的属性对测试集进行归一化
#----------------------------

# 构建训练数据
x_train, y_train = [], []
for i in range(60, len(training_set_scaled)):
    x_train.append(training_set_scaled[i - 60:i, 0])
    y_train.append(training_set_scaled[i, 0])

# 打乱数据
np.random.seed(7)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = np.array(x_train)[indices]
y_train = np.array(y_train)[indices]
x_train = np.reshape(x_train, (x_train.shape[0], 60, 1))

# 构建测试数据
x_test, y_test = [], []
for i in range(60, len(test_set)):
    x_test.append(test_set[i - 60:i, 0])
    y_test.append(test_set[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
x_test = np.reshape(x_test, (x_test.shape[0], 60, 1))
#模型定义
model = tf.keras.Sequential([
    LSTM(80, return_sequences=True),
    Dropout(0.2),
    LSTM(100),
    Dropout(0.2),
    Dense(1)
])

model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
              loss='mean_squared_error')  # 损失函数用均方误差
# 该应用只观测loss数值，不观测准确率，所以删去metrics选项，一会在每个epoch迭代显示时只显示loss值
# 解决方案1：直接使用脚本目录而不创建子目录
checkpoint_save_path = os.path.join(script_dir, "LSTM_stock.ckpt")
print(f"使用脚本目录作为检查点路径: {checkpoint_save_path}")

# 确保文件目录存在
checkpoint_dir = os.path.dirname(checkpoint_save_path)
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir, exist_ok=True)

# 检查点回调 - 使用绝对路径
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_save_path,
    save_weights_only=True,
    save_best_only=True,
    monitor='val_loss'
)

if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model（加载模型）-----------------')
    model.load_weights(checkpoint_save_path)


# 模型训练
print("开始训练模型...")
try:
    history = model.fit(
        x_train, y_train,
        batch_size=64,
        epochs=50,
        validation_data=(x_test, y_test),
        validation_freq=1,
        callbacks=[cp_callback]
    )
except Exception as e:
    print(f"训练过程中发生错误: {e}")
    print("尝试不使用回调函数训练...")
    history = model.fit(
        x_train, y_train,
        batch_size=64,
        epochs=50,
        validation_data=(x_test, y_test),
        validation_freq=1
    )

model.summary()

# 保存权重 - 使用绝对路径
weights_file = os.path.join(script_dir, 'weights.txt')
with open(weights_file, 'w') as file:
    for v in model.trainable_variables:
        file.write(f"{v.name}\n{v.shape}\n{v.numpy()}\n\n")
print(f"权重已保存到: {weights_file}")
# 方案1: 使用系统字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置黑体
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号
print("使用系统黑体显示中文")
# 绘制损失曲线
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(10, 6))
plt.plot(loss, label='训练损失')
plt.plot(val_loss, label='验证损失')
plt.title('训练和验证损失')
plt.xlabel('迭代次数')
plt.ylabel('损失值')
plt.legend()

# 保存图像 - 使用绝对路径
loss_image = os.path.join(script_dir, 'loss_curve.png')
plt.savefig(loss_image)
print(f"损失曲线图已保存到: {loss_image}")
plt.show()

# 预测和评估
predicted_stock_price = model.predict(x_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
real_stock_price = sc.inverse_transform(test_set[60:])

plt.figure(figsize=(12, 6))
plt.plot(real_stock_price, color='red', label='茅台股票价格')
plt.plot(predicted_stock_price, color='blue', label='预测的茅台股票价格')
plt.title('茅台股票价格预测')
plt.xlabel('时间')
plt.ylabel('价格')
plt.legend()

# 保存图像 - 使用绝对路径
prediction_image = os.path.join(script_dir, 'stock_prediction_LSTM.png')
plt.savefig(prediction_image)
print(f"股票预测图已保存到: {prediction_image}")
plt.show()


##########evaluate##############
# calculate MSE 均方误差 ---> E[(预测值-真实值)^2] (预测值减真实值求平方后求均值)
mse = mean_squared_error(predicted_stock_price, real_stock_price)
# calculate RMSE 均方根误差--->sqrt[MSE]    (对均方误差开方)
rmse = math.sqrt(mean_squared_error(predicted_stock_price, real_stock_price))
# calculate MAE 平均绝对误差----->E[|预测值-真实值|](预测值减真实值求绝对值后求均值）
mae = mean_absolute_error(predicted_stock_price, real_stock_price)
print('均方误差: %.6f' % mse)
print('均方根误差: %.6f' % rmse)
print('平均绝对误差: %.6f' % mae)
# 如果有临时目录，清理它
if 'tempfile' in locals() and os.path.exists(checkpoint_dir):
    print(f"清理临时目录: {checkpoint_dir}")
    shutil.rmtree(checkpoint_dir)