import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import random


np.random.seed(2025)
torch.manual_seed(2025)
random.seed(2025)
np.random.seed(2025)

dir_datas = '/home3/junkang/JJK/tpgn-paper-and-codes/datas_files'
import argparse

def parse_args():
    parser = argparse.ArgumentParser(description="Train and test a sequence model")
    parser.add_argument('--data_dir', type=str, default=dir_datas, help='Directory of the dataset')
    parser.add_argument('--fname_csv', type=str, default='weather.csv', help='Name of the dataset file')

    # 数据集划分比例
    parser.add_argument('--test_p', type=float, default=0.2, help='Proportion of the dataset to include in the test split')

    # 批次大小
    parser.add_argument('--batch_size', type=int, default=32, help='Batch size for training and testing')

    # 模型定义的参数
    parser.add_argument('--input_size', type=int, default=1, help='Input feature dimension')
    parser.add_argument('--hidden_size', type=int, default=32, help='Size of the hidden layer')
    parser.add_argument('--seq_len', type=int, default=168, help='Sequence length')

    # 训练次数
    parser.add_argument('--num_epochs', type=int, default=30, help='Number of training epochs')

    # 学习率
    parser.add_argument('--lr', type=float, default=0.001, help='Learning rate for the optimizer')

    # 模型类型
    parser.add_argument('--type_core', type=str, default='png', choices=['png', 'lstm', 'rnn'], help='Type of the core model')
    parser.add_argument('--is_png_with_posEmb', type=bool, default=0, help='Whether to use positional embedding with PGN model')

    # 设备
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', choices=['cuda', 'cpu'], help='Device to use for training and testing')

    return parser.parse_args()

# 解析命令行参数
args = parse_args()

# sigmoid函数及其反函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_inv(x):
    return np.log(x / (1 - x))

import pandas as pd

def get_seq_datas(fname_csv, dir_datas=args.data_dir, col_target=['OT']):
    """
    """
    fname_csv = os.path.join(dir_datas, fname_csv)
    df = pd.read_csv(fname_csv)
    df = df.dropna()
    df = df[col_target]
    
    return df

df = get_seq_datas(args.fname_csv)

# 数据集Dataset准备
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm

class MyDataset(Dataset):
    def __init__(self, df, seq_len = 168, mean=None, std=None):
        self.df = df
        self.seq_len = seq_len

        # 归一化用
        self.mean = mean
        self.std = std

        self.datas = self.get_seq_datas()

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, idx):
        """
        """
        # 获取数据
        data = self.datas[idx]

        return torch.tensor(data, dtype=torch.float32)
        

    def get_seq_datas(self):
        """
        """
        # 数据预处理
        # 归一化：Z-Score 标准化
        if self.mean is None or self.std is None:
            self.mean = self.df.mean()
            self.std = self.df.std()
        df = self.df
        df = (df - self.mean) / self.std

        datas = []
        for i in tqdm(range(len(df) - self.seq_len)):
            seq = df.iloc[i:i + self.seq_len].values
            datas.append(seq)
        return np.array(datas)

        
nums_test = int(len(df) * args.test_p)
# 划分训练集和测试集
train_df = df.iloc[:-nums_test]
test_df = df.iloc[-nums_test:]

dataset_train = MyDataset(train_df, args.seq_len)
mean, std = dataset_train.mean, dataset_train.std
dataset_test = MyDataset(test_df, args.seq_len, mean=mean, std=std)
dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False)

print(f'Train dataset: {len(dataset_train)} samples')
print(f'Test dataset: {len(dataset_test)} samples')

# 查看数据集
for i, data in enumerate(dataloader_train):
    # 打印数据的形状
    print(data.shape)
    break


# ### 模型定义

class PGN_native(nn.Module):
    def __init__(self, input_size = 1, hidden_size = 32, seq_len = 168):
        """
        Args:
            input_size: The number of expected features in the input `x`
 |          hidden_size: The number of features in the hidden state `h`
        """
        super(PGN_native, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.window_size = seq_len - 1

        self.hidden_MLP = nn.Conv1d(
            in_channels = input_size,
            out_channels = hidden_size,
            kernel_size = self.window_size,
            stride = 1,
            bias=True)
        
        # 卷积层的权重参数可以表示为一个四维张量，其形状为 (out_channels, in_channels, kernel_size)。
        # 在这个例子中，权重参数的形状为 (2 * hidden_size, input_size + hidden_size, 1)。
        # 这个权重参数可以看作是由两个部分组成的：前半部分用于计算门控状态，后半部分用于计算候选状态。
        self.gate = nn.Conv1d(
            in_channels = input_size + hidden_size, 
            out_channels = 2 * hidden_size,   # 2 * hidden_size 是为了同时计算门控状态G和候选状态H^
            kernel_size = 1, 
            stride = 1, bias=True)
        
        self.gate_values = None  # 保存门控状态，用于后续分析
    
    def forward(self, X):
        """
        Args:
            X: A tensor of shape `(B, L, input_size)`
        """
        # 将 序列X 的序列左边填充 window_size 个 零向量得到 padding_X: (B, window_size + L, input_size)
        padding = torch.zeros(X.shape[0], self.window_size, X.shape[2]).to(X.device)
        padding_X = torch.cat([padding, X], dim=1)
        # 计算隐藏状态H: (B, L, hidden_size * 2)
        padding_X = padding_X.permute(0, 2, 1)  # (B, input_size, window_size + L)
        H = self.hidden_MLP(padding_X[:, :, :-1]).permute(0, 2, 1)
        # 计算门控状态G和候选状态H_hat: (B, L, hidden_size)
        gate_status  = self.gate( torch.cat([X, H], dim=-1).permute(0, 2, 1) )
        G, H_hat = torch.split(gate_status.permute(0, 2, 1), self.hidden_size, dim=-1)
        G = torch.sigmoid(G)
        H_hat = torch.tanh(H_hat)
        # 计算输出(状态的选择和融合)
        Out = G * H + (1 - G) * H_hat

        self.gate_values = G  # 保存门控状态，用于后续分析
        return Out, G



class PGN_withPosEmb(nn.Module):
    def __init__(self, input_size = 1, hidden_size = 32, seq_len = 168):
        """
        Args:
            input_size: The number of expected features in the input `x`
 |          hidden_size: The number of features in the hidden state `h`
        """
        super(PGN_withPosEmb, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.window_size = seq_len - 1
        self.seq_len = seq_len

        self.posEmb = nn.Embedding(seq_len, input_size)

        self.hidden_MLP = nn.Conv1d(
            in_channels = input_size,
            out_channels = hidden_size,
            kernel_size = self.window_size,
            stride = 1,
            bias=True)
        
        # 卷积层的权重参数可以表示为一个四维张量，其形状为 (out_channels, in_channels, kernel_size)。
        # 在这个例子中，权重参数的形状为 (2 * hidden_size, input_size + hidden_size, 1)。
        # 这个权重参数可以看作是由两个部分组成的：前半部分用于计算门控状态，后半部分用于计算候选状态。
        self.gate = nn.Conv1d(
            in_channels = input_size + hidden_size, 
            out_channels = 2 * hidden_size,   # 2 * hidden_size 是为了同时计算门控状态G和候选状态H^
            kernel_size = 1, 
            stride = 1, bias=True)
        
        self.gate_values = None  # 保存门控状态，用于后续分析
    
    def forward(self, X):
        """
        Args:
            X: A tensor of shape `(B, seq_len, input_size)`
        """
        # pos Embedding
        pos_idx = torch.arange(self.seq_len, requires_grad =False).unsqueeze(0)  # (1, seq_len,)
        pos_idx = pos_idx.expand(X.shape[0], -1).to(X.device)  # (B, seq_len,)
        pos_idx = self.posEmb(pos_idx)  # (B, seq_len, input_size)
        X = X + pos_idx  # (B, seq_len, input_size)

        # 将 序列X 的序列左边填充 window_size 个 零向量得到 padding_X: (B, window_size + L, input_size)
        padding = torch.zeros(X.shape[0], self.window_size, X.shape[2]).to(X.device)
        padding_X = torch.cat([padding, X], dim=1)
        # 计算隐藏状态H: (B, L, hidden_size * 2)
        padding_X = padding_X.permute(0, 2, 1)  # (B, input_size, window_size + L)
        H = self.hidden_MLP(padding_X[:, :, :-1]).permute(0, 2, 1)
        # 计算门控状态G和候选状态H_hat: (B, L, hidden_size)
        gate_status  = self.gate( torch.cat([X, H], dim=-1).permute(0, 2, 1) )
        G, H_hat = torch.split(gate_status.permute(0, 2, 1), self.hidden_size, dim=-1)
        G = torch.sigmoid(G)
        H_hat = torch.tanh(H_hat)
        # 计算输出(状态的选择和融合)
        Out = G * H + (1 - G) * H_hat

        self.gate_values = G  # 保存门控状态，用于后续分析
        return Out, G


class SeqModel(nn.Module):
    def __init__(self, input_size = 1, hidden_size = 32, seq_len = 168, type_core = 'png', is_png_with_posEmb = False):
        super(SeqModel, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.seq_len = seq_len
        self.type_core = type_core

        if type_core == 'png':
            if is_png_with_posEmb:
                self.core = PGN_withPosEmb(input_size, hidden_size, seq_len)
            else:
                self.core = PGN_native(input_size, hidden_size, seq_len)
        elif type_core == 'lstm':
            self.core = nn.LSTM(input_size, hidden_size, batch_first=True)
        else:
            self.core = nn.RNN(input_size, hidden_size, batch_first=True)
        
        self.out_layers = nn.Linear(hidden_size, 1)

    def forward(self, X):
        """
        Args:
            X: A tensor of shape `(B, seq_len, input_size)`
        """
        out, _ = self.core(X)
        out = self.out_layers(out)
        return out

# ### 模型训练过程


# 定义模型训练过程
def train_model(model, dataloader, criterion, optimizer, num_epochs=10, device='cuda'):
    """
    Args:
        model: The model to train
        dataloader: The DataLoader for the training data
        criterion: The loss function
        optimizer: The optimizer
        num_epochs: The number of epochs to train
    """
    for epoch in range(num_epochs):
        # Set the model to training mode
        model.train()
        running_loss = 0.0
        print(f'Epoch [{epoch+1}/{num_epochs}]')
        for i, data in tqdm(enumerate(dataloader), desc='Training ...'):
            data = data.to(device)

            # Zero the parameter gradients
            optimizer.zero_grad()
            
            # Forward pass
            y = model(data[:, :-1, :])
            # Compute the loss
            loss = criterion(y, data[:, 1:, :])
            # Backward pass and optimization
            loss.backward()
            optimizer.step()

            # Print statistics
            running_loss += loss.item()

        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(dataloader):.4f}')

def test_model(model, dataloader, criterion1, criterion2, device='cuda'):
    """
    Args:
        model: The model to test
        dataloader: The DataLoader for the test data
        criterion: The loss function
    """
    model.eval()
    loss1_tested = []
    loss2_tested = []
    with torch.no_grad():
        loss1_test = 0.0
        loss2_test = 0.0
        for data in tqdm(dataloader, desc='Testing ...'):
            data = data.to(device)
            y = model(data[:, :-1, :])
            # 计算损失
            loss = criterion1(y, data[:, 1:, :])
            loss2 = criterion2(y, data[:, 1:, :])
            # 累加损失
            loss1_test += loss.item()
            loss2_test += loss2.item()

        # 计算平均损失
        loss1_tested.append(loss1_test / len(dataloader))
        loss2_tested.append(loss2_test / len(dataloader))
    print(f'Test Loss: {loss1_tested[-1]:.4f}, Test Loss2: {loss2_tested[-1]:.4f}')
    return loss1_tested, loss2_tested



device = args.device
input_size = next(iter(dataloader_train))[0].shape[-1]

model = SeqModel(input_size, args.hidden_size, args.seq_len, type_core = args.type_core, is_png_with_posEmb = args.is_png_with_posEmb)
print(model)
model = model.to(device)
# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

print(f'Training dataset {args.fname_csv} with {args.type_core} model...')
print('---' * 20)
# 训练模型
train_model(model, dataloader_train, criterion, optimizer, num_epochs=args.num_epochs, device=device)


# 测试模型
mse, mae = test_model(model, dataloader_test, criterion, nn.L1Loss(), device=device)
print('MSE:', mse)
print('MAE:', mae)