#按照https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9145522 
#的对照模型的LSTM构建的LSTM,目前还不确定是否一模一样,但层数都是完全一样的
import os
import torch
import time
import torch.nn as nn
import pandas as pd
import multiprocessing
import numpy as np
from torch.utils.data import Dataset, DataLoader, Subset
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from torch.utils.tensorboard import SummaryWriter
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import ceil
#from loguru import logger
from tqdm import tqdm

import mpld3
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import matplotlib
#matplotlib.rcParams['animation.ffmpeg_path'] = r'D:/deeplearning/anaconda3/Library/bin/ffmpeg.exe'

from AircraftDataset_new import AircraftDataset_new
from testandtrainfunction import Train_and_Test_each_epoch

import torch.multiprocessing as mp
mp.set_start_method('spawn', force=True)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batchsize = 128

train_dir= r'.../hangji/data/train'  #r"D:\CC\new\hangji\train"  ##
test_dir= r'.../hangji/data/test'  #r"D:\CC\new\hangji\test" # 
writer = SummaryWriter('logs')
plot_3d_root = r'.../hangji/77figure'

####??????? num_workers=4
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICE'] = "0,1,2,3"
device_ids = [0,1,2,3]  

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'


data_dir = r'.../hangji/dataset' #保存的dataset的文件夹,这样下次就不用重新用函数创建dataset,而是直接通过.pt读取
train_datapath = os.path.join(data_dir,'processed_train_dataset.pt')
# 检查处理后的数据是否已保存
if not os.path.exists(train_datapath):
    train_dataset = AircraftDataset_new(data_folder=train_dir, sequence_length=10, train=True)
    torch.save(train_dataset, data_dir+'/processed_train_dataset.pt')
else:
    print("start load train dataset")
    train_dataset = torch.load(data_dir+'/processed_train_dataset.pt')
    print("train dataset load success")
train_loader = DataLoader(train_dataset, batch_size=batchsize, shuffle=True, pin_memory=(device.type == 'cpu'))

# 对测试集重复上述步骤
test_datapath = os.path.join(data_dir,'processed_test_dataset.pt')
if not os.path.exists(test_datapath):
    test_dataset = AircraftDataset_new(data_folder=test_dir, sequence_length=10, train=False)
    torch.save(test_dataset, data_dir+'/processed_test_dataset.pt')
else:
    print("start load test dataset")
    test_dataset = torch.load(data_dir+'/processed_test_dataset.pt')
    print("test dataset load success")
test_loader = DataLoader(test_dataset, batch_size=batchsize, shuffle=False, pin_memory=(device.type == 'cpu'))

# train_dataset = AircraftDataset_new(data_folder=train_dir, sequence_length=10,train=True)
# train_loader = DataLoader(train_dataset, batch_size=batchsize, shuffle=True, pin_memory=(device.type == 'cpu')) #shuffle=True
# test_dataset = AircraftDataset_new(data_folder=test_dir, sequence_length=10,train=False)
# test_loader = DataLoader(test_dataset, batch_size=batchsize, shuffle=False, pin_memory=(device.type == 'cpu')) #shuffle=Ture

class LSTM(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim, num_layers, dropout_rate):
        super(LSTM, self).__init__()
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.lstm1 = nn.LSTM(input_dim, hidden_dim, num_layers=num_layers, batch_first=True)
        self.dropout1 = nn.Dropout(dropout_rate)
       
        self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, num_layers=num_layers, batch_first=True)
        self.dropout2 = nn.Dropout(dropout_rate)
       
        self.fc = nn.Linear(hidden_dim, output_dim)
        
    def forward(self, x):
        #print('\ninput:',x.shape) #([128, 10, 5])
        #x = x.permute(0, 2, 1)  ???
        x,_ = self.lstm1(x)
        x = self.dropout1(x)
        x,_ = self.lstm2(x)
        x = self.dropout2(x)
        x = self.fc(x[:,-1,:]) #全连接层 #这里x[:,-1,:]没太懂
        return x


if __name__ == '__main__':
    import multiprocessing
    multiprocessing.freeze_support()
    # 你的主要代码在这里运行，例如
    sequence_length = 10
    input_dim = 5  # Number of input features 经纬高
    hidden_dim = 50
    num_layers = 2
    output_dim = 3  # Adjust according to the task
    dropout_rate = 0.3
    learning_rate = 0.001
    num_epochs=300
    #model = nn.DataParallel(model,device_ids=device_ids)
    model = LSTM(input_dim, output_dim, hidden_dim, num_layers, dropout_rate)
    model.to(device)

    patience = 15
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    Train_and_Test_each_epoch(device, model, train_dataset, test_dataset, train_loader, test_loader, criterion, optimizer, num_epochs=num_epochs,patience=patience)
    torch.save(model.state_dict(), r'/disk/user/lyf/hangji/model/modelLSTM1.pth')