import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from SimpleNN import SimpleNN
import joblib
import pickle

project_path = "/home/szr/.A003_AI_Clock_Server/aiclock.server"
data_file = project_path+"早高峰（驾驶）数据集.xlsx"

df = pd.read_excel(data_file)

vectorizer = TfidfVectorizer(max_features=1000)
path_features = vectorizer.fit_transform(df['路径']).toarray()

# 将起点和终点作为附加特征
additional_features = df[['起点', '终点']].astype('category').apply(lambda x: x.cat.codes)
combined_features = np.hstack((additional_features, path_features))

# 归一化目标变量 
scaler = MinMaxScaler()
df['耗时'] = scaler.fit_transform(df[['耗时']])

# 分割特征和标签
X = combined_features
y = df['耗时'].values

lookback = 1  
inputs = X
labels = y

# 将数据分为训练集和测试集
test_portion = int(0.1 * len(inputs))
train_x = inputs[:-test_portion]
train_y = labels[:-test_portion]
test_x = inputs[-test_portion:]
test_y = labels[-test_portion:]

batch_size = 32
train_data = TensorDataset(torch.from_numpy(train_x).float(), torch.from_numpy(train_y).float())
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size, drop_last=True)

# 设置计算设备为 GPU
is_cuda = torch.cuda.is_available()
device = torch.device("cuda" if is_cuda else "cpu")


def train(train_loader, learn_rate, hidden_dim=256, EPOCHS=5):
    input_dim = next(iter(train_loader))[0].shape[1]
    output_dim = 1
    
    model = SimpleNN(input_dim, hidden_dim, output_dim)
    model.to(device)
    
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
    
    model.train()
    print("Starting Training of SimpleNN model")
    epoch_times = []
    
    for epoch in range(1, EPOCHS + 1):
        start_time = time.time()
        avg_loss = 0.0
        counter = 0
        for x, label in train_loader:
            counter += 1
            model.zero_grad()
            out = model(x.to(device))
            loss = criterion(out, label.to(device).view(-1, 1))
            loss.backward()
            optimizer.step()
            avg_loss += loss.item()
            if counter % 200 == 0:
                print("Epoch {}......Step: {}/{}....... Average Loss for Epoch: {}".format(epoch, counter, len(train_loader), avg_loss / counter))
        current_time = time.time()
        print("Epoch {}/{} Done, Total Loss: {}".format(epoch, EPOCHS, avg_loss / len(train_loader)))
        print("Total Time Elapsed: {} seconds".format(current_time - start_time))
        epoch_times.append(current_time - start_time)
    print("Total Training Time: {} seconds".format(sum(epoch_times)))
    return model

def evaluate(model, test_x, test_y):
    model.eval()
    inputs = torch.from_numpy(test_x).float().to(device)
    labels = torch.from_numpy(test_y).float().view(-1, 1).to(device)
    
    with torch.no_grad():
        outputs = model(inputs).cpu().numpy()
    
    targets = labels.cpu().numpy()
    sMAPE = np.mean(np.abs(outputs - targets) / (np.abs(targets) + np.abs(outputs)) / 2) * 100
    print("sMAPE: {}%".format(sMAPE))
    
    return outputs, targets, sMAPE

lr = 0.001
model = train(train_loader, lr)

outputs, targets, sMAPE = evaluate(model, test_x, test_y)

plt.figure(figsize=(14, 10))
plt.plot(outputs[:100], "-o", color="g", label="Predicted")
plt.plot(targets[:100], color="b", label="Actual")
plt.ylabel('Time (normalized)')
plt.legend()
plt.show()

# 保存模型
filename = './model_GRU1_早高峰驾驶数据集.pkl'
joblib.dump(model, filename)

# 保存标准化器
scaler_filename = './scaler.pkl'
with open(scaler_filename, 'wb') as f:
    pickle.dump(scaler, f)

# 保存特征列
model_columns = list(df[['起点', '终点']].astype('category').apply(lambda x: x.cat.codes).columns) + list(vectorizer.get_feature_names_out())
columns_filename = './model_columns.pkl'
with open(columns_filename, 'wb') as f:
    pickle.dump(model_columns, f)

print(f'Model saved as {filename}')
print(f'Scaler saved as {scaler_filename}')
print(f'Model columns saved as {columns_filename}')
