import pandas as pd
import torch
import os
import numpy as np
from sklearn import preprocessing
from joblib import dump
from models import *
from utils.ModelUtils import *
from sklearn.metrics import mean_absolute_percentage_error, mean_absolute_error, mean_squared_error, r2_score
from torch.utils.data import DataLoader, TensorDataset
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence


def correct_abnormal_values(features_, abnormal_indices):
    """
    修正异常值，处理连续异常情况
    :param features_: 原始特征数组 (n_samples, n_features)
    :param abnormal_indices: 异常点的索引列表
    :return: 修正后的特征数组, 掩码数组
    """
    corrected = features_.copy()
    mask = np.ones(len(features_))
    abnormal_indices = sorted(abnormal_indices)

    # 标记所有异常位置
    for idx in abnormal_indices:
        mask[idx] = 0

    # 找出所有连续的异常段
    segments = []
    if abnormal_indices:
        current_segment = [abnormal_indices[0]]
        for idx in abnormal_indices[1:]:
            if idx == current_segment[-1] + 1:
                current_segment.append(idx)
            else:
                segments.append(current_segment)
                current_segment = [idx]
        segments.append(current_segment)

    # 对每个连续段进行处理
    for segment in segments:
        start_idx = segment[0]
        end_idx = segment[-1]

        # 找出段前最后一个正常值
        left_value = None
        for i in range(start_idx - 1, -1, -1):
            if mask[i] == 1:  # 正常点
                left_value = features_[i, 0]  # flow是第一列
                break

        # 找出段后第一个正常值
        right_value = None
        for i in range(end_idx + 1, len(features_)):
            if mask[i] == 1:  # 正常点
                right_value = features_[i, 0]
                break

        # 处理边界情况
        if left_value is None and right_value is None:
            # 整个序列都是异常的，无法处理，保持原样
            continue
        elif left_value is None:
            # 异常段在开头，用right_value填充
            corrected[start_idx:end_idx + 1, 0] = right_value
        elif right_value is None:
            # 异常段在结尾，用left_value填充
            corrected[start_idx:end_idx + 1, 0] = left_value
        else:
            # 线性插值
            n_points = end_idx - start_idx + 1
            for i, idx in enumerate(range(start_idx, end_idx + 1)):
                alpha = (i + 1) / (n_points + 1)
                corrected[idx, 0] = (1 - alpha) * left_value + alpha * right_value

    return corrected, mask


def trainModel(df, modelPath, param, holidays,stationInfo,scalerPath,abnormal_dates=None):
    # 必要参数定义
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    train_ratio = param["train_ratio"]
    val_ratio = param["val_ratio"]
    test_ratio = param["test_ratio"]
    batch_size = param["batch_size"]
    input_length = param["input_length"]
    output_length = param["output_length"]
    loss_function = 'MSE'
    learning_rate = param["learning_rate"]
    weight_decay = 0.01
    num_blocks = param["num_blocks"]
    dim = param["dim"]
    scalar = True
    scalar_contain_labels = True
    target_value = 'flow'

    # 读取数据并预处理
    df['date'] = pd.to_datetime(df['date'])
    df = df.sort_values('date').reset_index(drop=True)

    # 添加时间特征
    df['day_of_week'] = df['date'].dt.dayofweek
    df['day_of_month'] = df['date'].dt.day
    df['month'] = df['date'].dt.month

    holidays['date'] = pd.to_datetime(holidays['date'])
    holiday_cols = ['date', 'is_holiday', 'holiday_type','holiday_num','serial_num','is_free']
    df = df.merge(holidays[holiday_cols], on='date', how='left')
    if stationInfo is not None and len(df)  > 1:
        print(type(stationInfo))
        print(stationInfo)
        stationInfo['date'] = pd.to_datetime(stationInfo['date'])
        stationInfo_cols = ['date', 'station_status', 'station_hour']
        df = df.merge(stationInfo[stationInfo_cols], on='date', how='left')

    # 填充空值（非节假日）
    df['is_holiday'] = df['is_holiday'].fillna(0)
    df['holiday_type'] = df['holiday_type'].fillna(0)
    df['holiday_num'] = df['holiday_num'].fillna(0)
    df['serial_num'] = df['serial_num'].fillna(0)
    df['is_free'] = df['is_free'].fillna(0)
    df['station_status'] = df['station_status'].fillna(0)
    df['station_hour'] = df['station_hour'].fillna(0)
    print(df)

    # 特征和标签分离
    features_num = 11
    features_ = df[['flow', 'day_of_week', 'day_of_month', 'month','is_holiday','holiday_type','holiday_num',"serial_num","is_free","station_status","station_hour"]].values
    labels_ = df[target_value].values

    # 修正异常值并创建掩码
    if abnormal_dates is not None:
        abnormal_indices = df[df['date'].isin(abnormal_dates)].index.tolist()
        features_, mask = correct_abnormal_values(features_, abnormal_indices)
    else:
        mask = np.ones(len(features_))

    # 数据标准化
    if scalar:
        train_features_ = features_[:int(len(features_) * train_ratio)]
        val_test_features_ = features_[int(len(features_) * train_ratio):]
        scalar = preprocessing.MinMaxScaler()



        train_features_ = scalar.fit_transform(train_features_)
        # 保存scalar
        if not os.path.exists(os.path.dirname(scalerPath)):
            os.makedirs(os.path.dirname(scalerPath))
        dump(scalar, scalerPath)  # 保存到文件
        val_test_features_ = scalar.transform(val_test_features_)
        features_ = np.vstack([train_features_, val_test_features_])
        if scalar_contain_labels:
            labels_ = features_[:, 0]

    # 创建滑动窗口数据集并计算有效长度
    features, labels = get_rolling_window_multistep(output_length, 0, input_length,
                                                    features_.T, np.expand_dims(labels_, 0))


    features = features.permute(0, 2, 1)

    # 计算每个序列的有效长度（考虑异常值mask）
    lengths = []
    for i in range(features.size(0)):
        seq_mask = mask[i:i + input_length]
        lengths.append(int(seq_mask.sum()))  # 有效时间步数

    # 构建数据集
    labels = torch.squeeze(labels, dim=1)
    features = features.to(torch.float32)
    labels = labels.to(torch.float32)
    lengths = torch.tensor(lengths, dtype=torch.long)

    # 数据集划分
    split_train_val = int(len(features) * train_ratio)
    split_val_test = int(len(features) * (train_ratio + val_ratio))

    train_features = features[:split_train_val]
    train_labels = labels[:split_train_val]
    train_lengths = lengths[:split_train_val]

    val_features = features[split_train_val:split_val_test]
    val_labels = labels[split_train_val:split_val_test]
    val_lengths = lengths[split_train_val:split_val_test]

    test_features = features[split_val_test:]
    test_labels = labels[split_val_test:]
    test_lengths = lengths[split_val_test:]

    # 数据管道构建
    train_Datasets = TensorDataset(train_features.to(device), train_labels.to(device), train_lengths.to(device))
    train_Loader = DataLoader(batch_size=batch_size, dataset=train_Datasets, shuffle=True)

    val_Datasets = TensorDataset(val_features.to(device), val_labels.to(device), val_lengths.to(device))
    val_Loader = DataLoader(batch_size=batch_size, dataset=val_Datasets)

    test_Datasets = TensorDataset(test_features.to(device), test_labels.to(device), test_lengths.to(device))
    test_Loader = DataLoader(batch_size=batch_size, dataset=test_Datasets)

    # 模型定义
    LSTMMain_model = LSTMMainWithPadding(input_size=features_num, output_len=output_length,
                                         lstm_hidden=dim, lstm_layers=num_blocks,
                                         batch_size=batch_size, device=device, dropout_rate=0.2)
    LSTMMain_model.to(device)

    if loss_function == 'MSE':
        loss_func = nn.MSELoss(reduction='mean')

    # 训练配置
    epochs = param["epochs"]
    optimizer = torch.optim.AdamW(LSTMMain_model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs // 3, eta_min=0.00001)

    # 训练循环
    print("——————————————————————Training Starts——————————————————————")
    best_val_loss = float('inf')
    for epoch in range(epochs):
        # 训练
        LSTMMain_model.train()
        train_loss_sum = 0
        for step, (feature_, label_, lengths_) in enumerate(train_Loader):
            optimizer.zero_grad()
            prediction = LSTMMain_model(feature_, lengths_)
            loss = loss_func(prediction, label_)
            loss.backward()
            torch.nn.utils.clip_grad_norm(LSTMMain_model.parameters(), 0.15)
            optimizer.step()
            train_loss_sum += loss.item()

        # 验证
        LSTMMain_model.eval()
        val_loss_sum = 0
        with torch.no_grad():
            for feature_, label_, lengths_ in val_Loader:
                prediction = LSTMMain_model(feature_, lengths_)
                val_loss = loss_func(prediction, label_)
                val_loss_sum += val_loss.item()

        # 打印训练信息
        print(f"Epoch {epoch + 1}/{epochs}")
        print(f'Train Loss: {train_loss_sum / len(train_Loader):.4f}')
        print(f'Val Loss: {val_loss_sum / len(val_Loader):.4f}')

        # 保存最佳模型
        if val_loss_sum < best_val_loss:
            best_val_loss = val_loss_sum
            torch.save(LSTMMain_model.state_dict(), modelPath)
            print("Best model saved!")

    print(f"Best Val Loss: {best_val_loss / len(val_Loader):.4f}")
    print("——————————————————————Training Ends——————————————————————")

    # 测试集预测
    LSTMMain_model.load_state_dict(torch.load(modelPath))
    LSTMMain_model.eval()

    test_loss_sum = 0
    all_predictions = []
    all_labels = []

    with torch.no_grad():
        for feature_, label_, lengths_ in test_Loader:
            prediction = LSTMMain_model(feature_, lengths_)
            loss = loss_func(prediction, label_)
            test_loss_sum += loss.item()

            all_predictions.append(prediction.cpu().numpy())
            all_labels.append(label_.cpu().numpy())

    # 合并预测结果和逆归一化
    pre_array = np.concatenate(all_predictions)
    test_labels = np.concatenate(all_labels)

    if scalar_contain_labels and scalar:
        temp_pred = np.zeros((pre_array.shape[0], pre_array.shape[1], features_num))
        temp_test = np.zeros((test_labels.shape[0], test_labels.shape[1], features_num))

        temp_pred[:, :, 0] = pre_array
        temp_test[:, :, 0] = test_labels

        pre_inverse = []
        test_inverse = []
        for i in range(pre_array.shape[0]):
            pred_inv = scalar.inverse_transform(temp_pred[i])
            test_inv = scalar.inverse_transform(temp_test[i])
            pre_inverse.append(pred_inv[:, 0])
            test_inverse.append(test_inv[:, 0])

        pre_array = np.array(pre_inverse)
        test_labels = np.array(test_inverse)

    # 计算评估指标
    MSE_l = mean_squared_error(test_labels, pre_array)
    MAE_l = mean_absolute_error(test_labels, pre_array)
    MAPE_l = mean_absolute_percentage_error(test_labels, pre_array)
    R2 = r2_score(test_labels, pre_array)

    print('Test Metrics:')
    print(f'MSE: {MSE_l:.4f}')
    print(f'MAE: {MAE_l:.4f}')
    print(f'MAPE: {MAPE_l:.4%}')
    print(f'R2: {R2:.4f}')