import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.optim as optim
from util import Utils
import matplotlib.pyplot as plt
import torch.nn.functional as F

import random

plt.rcParams['font.family'] = 'Times New Roman'

# 设置随机种子
seed = 128
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

utils = Utils()
df_processed = utils.process_missing_values()
df_encoded = utils.process_label_encoder(df_processed)

df_encoded['datetime'] = pd.to_datetime(df_encoded['datetime'])
df = df_encoded.set_index('datetime')

data_min = np.min(df.values, axis=0)
data_max = np.max(df.values, axis=0)
# 归一化数据
scaled_data = (df.values - data_min) / (data_max - data_min)


# 定义反归一化函数
def inverse_normalize(data):
    return data * (data_max[2] - data_min[2]) + data_min[2]


# 定义滑动窗口函数
def create_sliding_window(data, window_size):
    window_data = []
    for i in range(len(data) - window_size):
        window = data[i:i + window_size]
        target = data[i + window_size][2]
        window_data.append((window, target))
    return window_data


window_size = 24
windowed_data = create_sliding_window(scaled_data, window_size)

# 将数据划分为训练集和测试集
train_data, test_data = train_test_split(windowed_data, test_size=0.2, shuffle=False)

# 将 train_data 转换为 numpy 数组
train_inputs = np.array([data[0] for data in train_data])
train_targets = np.array([data[1] for data in train_data])

# 将 test_data 转换为 numpy 数组
test_inputs = np.array([data[0] for data in test_data])
test_targets = np.array([data[1] for data in test_data])

# 将 numpy 数组转换为 Tensor
train_inputs = torch.tensor(train_inputs)
train_targets = torch.tensor(train_targets)
test_inputs = torch.tensor(test_inputs)
test_targets = torch.tensor(test_targets)

# 将输入数据的维度调整为 (sequence_length, batch_size, input_size)
train_inputs = train_inputs.view(-1, window_size, train_inputs.size(2))

test_inputs = test_inputs.view(-1, window_size, test_inputs.size(2))

train_inputs = train_inputs.float()
train_targets = train_targets.float()
test_inputs = test_inputs.float()
test_targets = test_targets.float()

# 定义 batch size
batch_size = 128


class CNNLSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(CNNLSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=3, padding=1)
        self.lstm = nn.LSTM(hidden_size, hidden_size)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = x.permute(0, 2, 1)  # 转换输入的维度，使其适合卷积层的输入
        x = F.relu(self.conv1(x))
        x = x.permute(2, 0, 1)  # 将维度转换回原始维度
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[-1, :, :])  # 只使用最后一个时间步的输出
        return output




input_size = train_inputs.size(2)  # 输入特征的维度
hidden_size = 64  # 隐藏层维度
output_size = 1  # 输出的维度

model = CNNLSTMModel(input_size, hidden_size, output_size)
criterion = nn.MSELoss()  # 损失函数
optimizer = optim.Adam(model.parameters(), lr=0.001)  # 优化器

num_epochs = 100  # 训练轮数
loss_list = []
# 训练循环
for epoch in range(num_epochs):
    model.train()  # 设置模型为训练模式
    optimizer.zero_grad()  # 梯度归零
    # # 数据划分为小批次
    # for i in range(0, len(train_inputs), batch_size):
    #     inputs = train_inputs[i:i + batch_size]
    #     targets = train_targets[i:i + batch_size]
    # 前向传播
    outputs = model(train_inputs)
    loss = criterion(outputs.squeeze(), train_targets)

    # 反向传播和优化
    loss.backward()
    optimizer.step()
    print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, loss.item()))
    loss_list.append(loss.item())

# 评估模式
model.eval()

# 对训练集进行预测
train_predictions = model(train_inputs).detach().numpy()

# 对测试集进行预测
test_predictions = model(test_inputs).detach().numpy()

# 计算训练集和测试集的损失
train_loss = criterion(torch.tensor(train_predictions.squeeze()), train_targets).item()
test_loss = criterion(torch.tensor(test_predictions.squeeze()), test_targets).item()

print('Train Loss: {:.4f}'.format(train_loss))
print('Test Loss: {:.4f}'.format(test_loss))

model.eval()

# 对测试集进行预测并反归一化
test_predictions = model(test_inputs).detach().numpy()
test_predictions = inverse_normalize(test_predictions)
# # 反归一化真实值
test_targets = inverse_normalize(test_targets)
# 绘制预测结果和真实值
plt.figure(figsize=(10, 6))
plt.plot(test_targets, label='True')
plt.plot(test_predictions, label='Predicted')
plt.xlabel('Time')
plt.ylabel('Temperature')
plt.legend()
plt.savefig('cnn_lstm_true_vs_predict.jpg')
plt.show()

# 绘制训练损失曲线
plt.plot(loss_list)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss')
plt.savefig('cnn_lstm_loss.jpg')
plt.show()
