
import pandas as pd
import numpy as np
import random
import os
import torch
import torch.nn as nn
import torch.optim as optim


from sklearn.preprocessing import StandardScaler
from torch.utils.data import TensorDataset, DataLoader

pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 1000)

data = pd.read_csv('../dataset/train.csv',encoding='big5')
# print(data.head(30))





#提取3列以后的值作为训练集，并处理NR的值
data = data.replace('NR',0)
train_data = data.iloc[:,3:]
numpy_data = train_data.to_numpy()
# print(numpy_data)
#查看有没有缺失值
# print(train_data.isnull().sum())


#处理数据，让数据按时间处理
datas = []
for i in range(0,data.shape[0],18):
    datas.append(numpy_data[i:i+18,:])
    # print(datas)

#将datas转换为numpy数组
datas_array = np.array(datas,dtype=np.float32)
# print(datas_array)

#进行维度转换，把特征变成列,一列一个特征
train_data = pd.DataFrame(datas_array.transpose(1, 0, 2).reshape(18, -1).T)
# print(train_data)

#特征和标签提取
x = train_data.drop(9,axis=1)
# print(x)
y = train_data[9]
# print(y)
# print(len(x[0]))
#划分训练集和测试集
split_idx = int(0.8 * len(train_data))
x_train = x.iloc[:split_idx, :]  # 前80%的行，所有列
x_test = x.iloc[split_idx:, :]   # 后20%的行，所有列
y_train = y.iloc[:split_idx]     # 前80%的标签
y_test = y.iloc[split_idx:]      # 后20%的标签

#数据标准化，防止模型误认为某个参数权重最大
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(x_train)
X_test_scaled = scaler.transform(x_test)

#将数据张量化，支持自求导
x_train_tensor = torch.tensor(X_train_scaled, dtype=torch.float32)
x_test_tensor = torch.tensor(X_test_scaled, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train.values, dtype=torch.float32).view(-1, 1)
y_test_tensor = torch.tensor(y_test.values, dtype=torch.float32).view(-1, 1)
# print(x_train_tensor)
#数据量太大，创建加载器
train_dataset = TensorDataset(x_train_tensor, y_train_tensor)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)

#定义一个DNN模型
class DNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(DNN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, hidden_size)
        self.fc4 = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        x = self.fc4(x)
        return x

input_size = x_train_tensor.shape[1]
hidden_size = 60
output_size = 1
model = DNN( input_size, hidden_size, output_size)

#定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

#开始迭代
num_epochs = 500

for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    for inputs, labels in train_loader:
        optimizer.zero_grad()

        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, labels)

        # 反向传播和优化
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

    avg_loss = total_loss / len(train_loader)
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {avg_loss:.4f}')


with torch.no_grad():
    model.eval()
    values = model(x_test_tensor)
    loss = criterion(values, y_test_tensor)
    print(f'Test Loss: {loss:.4f}')

