# 导入必要的库
import argparse
import csv
import os
import time
import pandas as pd
import dill
import numpy as np
import torch
from matplotlib import pyplot as plt
import seaborn as sns
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from tqdm import tqdm
import warnings
import math
import sys

warnings.filterwarnings('ignore')


# 设置随机种子
def same_seeds(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.benchmark = True


# 早停机制
class EarlyStopping:
    def __init__(self, save_path, patience=7, verbose=False, delta=0):
        self.save_path = save_path
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.inf
        self.delta = delta

    def __call__(self, val_loss, model):
        score = -val_loss
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
        elif score < self.best_score + self.delta:
            self.counter += 1
            print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
            self.counter = 0

    def save_checkpoint(self, val_loss, model):
        if self.verbose:
            print(f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...")
        path = self.save_path
        torch.save(model, path, pickle_module=dill)
        self.val_loss_min = val_loss


# 数据集类
class data_detime(Dataset):
    def __init__(self, data, lookback_length, lookforward_length, multi_steps=False):
        self.seq_len = lookback_length
        self.pred_len = lookforward_length
        self.multi_steps = multi_steps
        self.data_y = data
        self.data = data
        print(self.data.shape)

    def __getitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_len
        x = self.data[s_begin:s_end]
        if self.multi_steps:
            y = self.data_y[s_end:s_end + self.pred_len, 0]
        else:
            y = self.data_y[s_end + self.pred_len - 1:s_end + self.pred_len, 0]
        return x, y

    def __len__(self):
        return len(self.data) - self.seq_len - self.pred_len + 1


# 数据分割和预处理
def split_data_cnn(data, train, test, lookback_length):
    for column in list(data.columns[data.isnull().sum() > 0]):
        data[column].interpolate(method='linear', limit_direction='forward')

    date_column = 'Date' if 'Date' in data.columns else 'date'
    timestamp = data[[date_column]]
    timestamp['date'] = pd.to_datetime(timestamp[date_column])

    cols = list(data.columns)
    cols.remove(date_column)
    data = data[cols].values
    data[:, 0] = np.maximum(data[:, 0], 0)

    length = len(data)
    num_train = int(length * train)
    num_test = int(length * test)
    num_valid = length - num_test - num_train

    timestamp_train = timestamp[0:num_train]
    timestamp_valid = timestamp[num_train - lookback_length:num_train + num_valid]
    timestamp_test = timestamp[num_train + num_valid - lookback_length:]

    scalar = StandardScaler()
    scalar_y = StandardScaler()
    y = data[0:num_train, 0].reshape(-1, 1)
    scalar_y.fit(y)
    scalar.fit(data[0:num_train])
    data = scalar.transform(data)

    data_train = data[0:num_train]
    data_valid = data[num_train - lookback_length:num_train + num_valid]
    data_test = data[num_train + num_valid - lookback_length:length]

    return data_train, data_valid, data_test, timestamp_train, timestamp_valid, timestamp_test, scalar_y


# 评估指标
def metrics_of_pv(preds, trues):
    pred = np.array(preds).flatten()
    true = np.array(trues).flatten()
    mae = np.round(mean_absolute_error(true, pred), 4)
    mse = np.round(mean_squared_error(true, pred), 4)
    r2 = np.round(r2_score(true, pred), 4)
    mbe = np.round(np.mean(pred - true), 4)
    return [mae, mse, r2, mbe]


# 训练函数
def train(data, model, criterion, optm, device):
    model.train()
    running_loss = 0.0
    for x, y in tqdm(data):
        model.zero_grad()
        x, y = x.float().to(device), y.float().to(device)
        optm.zero_grad()
        y_pre = model(x)
        loss = criterion(y_pre, y)
        loss.backward()
        optm.step()
        running_loss += loss.item() * x.size(0)
    epoch_loss = running_loss / len(data.dataset)
    return epoch_loss


# 评估函数
def evaluate(data, model, criterion, device, scalar=None):
    model.eval()
    val_running_loss = 0.0
    all_preds = []
    all_labels = []
    for x, y in tqdm(data):
        model.zero_grad()
        with torch.no_grad():
            x, y = x.float().to(device), y.float().to(device)
            y_pre = model(x)
            #print(f"y_pre shape: {y_pre.shape}, y shape: {y.shape}")  # 调试
            loss = criterion(y_pre, y)
            val_running_loss += loss.item() * x.size(0)
            all_preds.append(y_pre.cpu().numpy())
            all_labels.append(y.cpu().numpy())
    epoch_loss = val_running_loss / len(data.dataset)

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)
    #print(f"all_preds shape: {all_preds.shape}, all_labels shape: {all_labels.shape}")  # 调试

    if scalar is not None:
        all_preds = scalar.inverse_transform(all_preds)
        all_labels = scalar.inverse_transform(all_labels)

    metrics_ = metrics_of_pv(all_preds, all_labels)
    return epoch_loss, metrics_, all_labels, all_preds


# KAN线性层
class KANLinear(torch.nn.Module):
    def __init__(
            self,
            in_features,
            out_features,
            grid_size=5,
            spline_order=3,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            enable_standalone_scale_spline=True,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1],
    ):
        super(KANLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.grid_size = grid_size
        self.spline_order = spline_order

        h = (grid_range[1] - grid_range[0]) / grid_size
        grid = (
            (
                    torch.arange(-spline_order, grid_size + spline_order + 1) * h
                    + grid_range[0]
            )
            .expand(in_features, -1)
            .contiguous()
        )
        self.register_buffer("grid", grid)

        self.base_weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
        self.spline_weight = torch.nn.Parameter(
            torch.Tensor(out_features, in_features, grid_size + spline_order)
        )
        if enable_standalone_scale_spline:
            self.spline_scaler = torch.nn.Parameter(
                torch.Tensor(out_features, in_features)
            )

        self.scale_noise = scale_noise
        self.scale_base = scale_base
        self.scale_spline = scale_spline
        self.enable_standalone_scale_spline = enable_standalone_scale_spline
        self.base_activation = base_activation()
        self.grid_eps = grid_eps

        self.reset_parameters()

    def reset_parameters(self):
        torch.nn.init.kaiming_uniform_(self.base_weight, a=math.sqrt(5) * self.scale_base)
        with torch.no_grad():
            noise = (
                    (
                            torch.rand(self.grid_size + 1, self.in_features, self.out_features)
                            - 1 / 2
                    )
                    * self.scale_noise
                    / self.grid_size
            )
            self.spline_weight.data.copy_(
                (self.scale_spline if not self.enable_standalone_scale_spline else 1.0)
                * self.curve2coeff(
                    self.grid.T[self.spline_order: -self.spline_order],
                    noise,
                )
            )
            if self.enable_standalone_scale_spline:
                torch.nn.init.kaiming_uniform_(self.spline_scaler, a=math.sqrt(5) * self.scale_spline)

    def b_splines(self, x: torch.Tensor):
        assert x.dim() == 2 and x.size(1) == self.in_features
        grid: torch.Tensor = self.grid
        x = x.unsqueeze(-1)
        bases = ((x >= grid[:, :-1]) & (x < grid[:, 1:])).to(x.dtype)
        for k in range(1, self.spline_order + 1):
            bases = (
                            (x - grid[:, : -(k + 1)])
                            / (grid[:, k:-1] - grid[:, : -(k + 1)])
                            * bases[:, :, :-1]
                    ) + (
                            (grid[:, k + 1:] - x)
                            / (grid[:, k + 1:] - grid[:, 1:(-k)])
                            * bases[:, :, 1:]
                    )
        assert bases.size() == (
            x.size(0),
            self.in_features,
            self.grid_size + self.spline_order,
        )
        return bases.contiguous()

    def curve2coeff(self, x: torch.Tensor, y: torch.Tensor):
        assert x.dim() == 2 and x.size(1) == self.in_features
        assert y.size() == (x.size(0), self.in_features, self.out_features)
        A = self.b_splines(x).transpose(0, 1)
        B = y.transpose(0, 1)
        solution = torch.linalg.lstsq(A, B).solution
        result = solution.permute(2, 0, 1)
        assert result.size() == (
            self.out_features,
            self.in_features,
            self.grid_size + self.spline_order,
        )
        return result.contiguous()

    @property
    def scaled_spline_weight(self):
        return self.spline_weight * (
            self.spline_scaler.unsqueeze(-1)
            if self.enable_standalone_scale_spline
            else 1.0
        )

    def forward(self, x: torch.Tensor):
        assert x.size(-1) == self.in_features
        original_shape = x.shape
        x = x.reshape(-1, self.in_features)
        base_output = F.linear(self.base_activation(x), self.base_weight)
        spline_output = F.linear(
            self.b_splines(x).view(x.size(0), -1),
            self.scaled_spline_weight.view(self.out_features, -1),
        )
        output = base_output + spline_output
        output = output.reshape(*original_shape[:-1], self.out_features)
        return output

    @torch.no_grad()
    def update_grid(self, x: torch.Tensor, margin=0.01):
        assert x.dim() == 2 and x.size(1) == self.in_features
        batch = x.size(0)
        splines = self.b_splines(x)
        splines = splines.permute(1, 0, 2)
        orig_coeff = self.scaled_spline_weight
        orig_coeff = orig_coeff.permute(1, 2, 0)
        unreduced_spline_output = torch.bmm(splines, orig_coeff)
        unreduced_spline_output = unreduced_spline_output.permute(1, 0, 2)
        x_sorted = torch.sort(x, dim=0)[0]
        grid_adaptive = x_sorted[
            torch.linspace(
                0, batch - 1, self.grid_size + 1, dtype=torch.int64, device=x.device
            )
        ]
        uniform_step = (x_sorted[-1] - x_sorted[0] + 2 * margin) / self.grid_size
        grid_uniform = (
                torch.arange(
                    self.grid_size + 1, dtype=torch.float32, device=x.device
                ).unsqueeze(1)
                * uniform_step
                + x_sorted[0]
                - margin
        )
        grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive
        grid = torch.concatenate(
            [
                grid[:1]
                - uniform_step
                * torch.arange(self.spline_order, 0, -1, device=x.device).unsqueeze(1),
                grid,
                grid[-1:]
                + uniform_step
                * torch.arange(1, self.spline_order + 1, device=x.device).unsqueeze(1),
            ],
            dim=0,
        )
        self.grid.copy_(grid.T)
        self.spline_weight.data.copy_(self.curve2coeff(x, unreduced_spline_output))


# KAN网络
class KAN(torch.nn.Module):
    def __init__(
            self,
            layers_hidden,
            grid_size=5,
            spline_order=3,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1],
    ):
        super(KAN, self).__init__()
        self.grid_size = grid_size
        self.spline_order = spline_order

        self.layers = torch.nn.ModuleList()
        for in_features, out_features in zip(layers_hidden, layers_hidden[1:]):
            self.layers.append(
                KANLinear(
                    in_features,
                    out_features,
                    grid_size=grid_size,
                    spline_order=spline_order,
                    scale_noise=scale_noise,
                    scale_base=scale_base,
                    scale_spline=scale_spline,
                    base_activation=base_activation,
                    grid_eps=grid_eps,
                    grid_range=grid_range,
                )
            )

    def forward(self, x: torch.Tensor, update_grid=False):
        for layer in self.layers:
            if update_grid:
                layer.update_grid(x)
            x = layer(x)
        return x


# 交叉注意力模块
class CrossAttention(nn.Module):
    def __init__(self, dim, lenth):
        super(CrossAttention, self).__init__()
        self.query_layer = nn.Linear(dim, dim)
        self.key_layer = nn.Linear(lenth, dim)
        self.value_layer = nn.Linear(lenth, dim)
        self.scale = dim ** 0.5

    def forward(self, input1, input2):
        query = self.query_layer(input1)
        key = self.key_layer(input2)
        value = self.value_layer(input2)
        attention_scores = torch.matmul(query, key.transpose(-2, -1)) / self.scale
        attention_weights = torch.softmax(attention_scores, dim=-1)
        attended_output = torch.matmul(attention_weights, value)
        return attended_output


# 可逆实例归一化
class RevIN(nn.Module):
    def __init__(self, num_features: int, eps=1e-5, affine=True):
        super(RevIN, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.affine = affine
        if self.affine:
            self._init_params()

    def forward(self, x, mode: str):
        if mode == 'norm':
            self._get_statistics(x)
            x = self._normalize(x)
        elif mode == 'denorm':
            x = self._denormalize(x)
        else:
            raise NotImplementedError
        return x

    def _init_params(self):
        self.affine_weight = nn.Parameter(torch.ones(self.num_features))
        self.affine_bias = nn.Parameter(torch.zeros(self.num_features))

    def _get_statistics(self, x):
        dim2reduce = tuple(range(1, x.ndim - 1))
        self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()
        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()

    def _normalize(self, x):
        x = x - self.mean
        x = x / self.stdev
        if self.affine:
            x = x * self.affine_weight
            x = x + self.affine_bias
        return x

    def _denormalize(self, x):
        if self.affine:
            x = x - self.affine_bias
            x = x / (self.affine_weight + self.eps * self.eps)
        x = x * self.stdev
        x = x + self.mean
        return x


# 注意力模块
class Attention(nn.Module):
    def __init__(
            self,
            dim,
            dim_head=32,
            heads=4,
            dropout=0.,
            flash=True
    ):
        super().__init__()
        self.scale = dim_head ** -0.5
        self.dim_inner = dim_head * heads

        self.to_qkv = nn.Sequential(
            nn.Linear(dim, self.dim_inner * 3, bias=False),
            nn.Identity()
        )

        self.to_out = nn.Sequential(
            nn.Identity(),
            nn.Linear(self.dim_inner, dim, bias=False),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        qkv = self.to_qkv(x)
        return self.to_out(qkv[:, :, :self.dim_inner])


# 前馈网络
class FeedForward(nn.Module):
    def __init__(self, dim, mult=4, dropout=0.):
        super().__init__()
        dim_inner = int(dim * mult)
        self.net = nn.Sequential(
            nn.Linear(dim, dim_inner),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(dim_inner, dim)
        )

    def forward(self, x):
        return self.net(x)


# iTransformer_LSTM模型
class iTransformer_LSTM(nn.Module):
    def __init__(self, input_size=5, length_input=24, dim_embed=32, dim_lstm=32,
                 depth=3, heads=12, depth_lstm=3, length_pre=1):
        super(iTransformer_LSTM, self).__init__()
        self.model1 = nn.ModuleList([])
        for _ in range(depth):
            self.model1.append(nn.ModuleList([
                Attention(dim_embed, heads=heads),
                nn.LayerNorm(dim_embed),
                FeedForward(dim_embed),
                nn.LayerNorm(dim_embed)
            ]))

        self.lstm = nn.LSTM(input_size=input_size - 1,
                            hidden_size=dim_lstm,
                            num_layers=depth_lstm,
                            batch_first=True,
                            bidirectional=False)

        self.mlp_in = nn.Sequential(
            nn.Linear(length_input, dim_embed),
            nn.LayerNorm(dim_embed)
        )

        self.cross = CrossAttention(dim=dim_embed, lenth=dim_lstm)
        self.k_mpl = KAN([dim_embed, length_pre])

    def forward(self, x):
        x_first_col = x[:, :, 0]
        x1 = self.mlp_in(x_first_col.unsqueeze(-1).transpose(1, 2))
        x2, _ = self.lstm(x[:, :, 1:])
        for attn, norm1, ff, norm2 in self.model1:
            x1 = norm1(attn(x1) + x1)
            x1 = norm2(ff(x1) + x1)
        x1 = self.cross(x1, x2)
        output = self.k_mpl(x1)
        return output[:, 0, :]


# 主函数
if __name__ == "__main__":
    seeds = 42
    same_seeds(seeds)

    site = '7-First-Solar'
    dataset = 'Autumn'

    batch_size = 128
    learning_rate = 0.0001
    epochs = 150

    if not any(x in sys.argv[0] for x in ['ipykernel_launcher.py', 'colab_kernel_launcher.py']):
        parser = argparse.ArgumentParser(description="Hyperparameters")
        parser.add_argument("--batch_size", type=int, default=128)
        parser.add_argument("--learning_rate", type=float, default=0.0001)
        parser.add_argument("--epochs", type=int, default=150)
        args = parser.parse_args()
        batch_size = args.batch_size
        learning_rate = args.learning_rate
        epochs = args.epochs

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # file_path = f'solar.csv'
    file_path = f'new_station07.csv'

    time_length = 24 * 1
    predict_length = 1
    multi_steps = False

    os.makedirs(f'model_save/{dataset}', exist_ok=True)
    os.makedirs(f'data_record/{dataset}', exist_ok=True)

    # 直接尝试读取CSV文件
    try:
        df_all = pd.read_csv(file_path, header=0)
    except Exception as e:
        print(f"Error loading CSV file: {e}")
        exit(1)

    data_train, data_valid, data_test, timestamp_train, timestamp_valid, timestamp_test, scalar = split_data_cnn(
        df_all, 0.8, 0.1, time_length)

    dataset_train = data_detime(data=data_train, lookback_length=time_length, multi_steps=multi_steps,
                                lookforward_length=predict_length)
    dataset_valid = data_detime(data=data_valid, lookback_length=time_length, multi_steps=multi_steps,
                                lookforward_length=predict_length)
    dataset_test = data_detime(data=data_test, lookback_length=time_length, multi_steps=multi_steps,
                               lookforward_length=predict_length)

    train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
    valid_loader = DataLoader(dataset_valid, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)

    params_dict = {'hidden_dim': 32, 'layer_L': 3, 'layer_I': 4, 'heads': 12, 'dim_lstm': 32}
    model = iTransformer_LSTM(
        input_size=df_all.shape[1] - 1,
        length_input=time_length,
        dim_embed=params_dict['hidden_dim'],
        dim_lstm=params_dict['dim_lstm'],
        depth=params_dict['layer_I'],
        heads=params_dict['heads'],
        depth_lstm=params_dict['layer_L']
    ).to(device)

    criterion_MAE = nn.L1Loss(reduction='sum').to(device)
    criterion_MSE = nn.MSELoss(reduction='sum').to(device)
    optm = optim.Adam(model.parameters(), lr=learning_rate)
    #可能版本不一样,我这里没支持 verbose=True
    optm_schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optm, mode="min", factor=0.5, patience=5
    )

    model_name = f"iLK_{dataset}"
    model_save = f"model_save/{dataset}/{model_name}.pt"

    train_losses, valid_losses = [], []
    earlystopping = EarlyStopping(model_save, patience=10, delta=0.0001)

    need_train = True
    if need_train:
        try:
            for epoch in range(epochs):
                time_start = time.time()
                train_loss = train(data=train_loader, model=model, criterion=criterion_MAE, optm=optm,
                                   device=device)
                valid_loss, ms, _, _ = evaluate(data=valid_loader, model=model, criterion=criterion_MAE,
                                                device=device)

                train_losses.append(train_loss)
                valid_losses.append(valid_loss)
                optm_schedule.step(valid_loss)
                earlystopping(valid_loss, model)

                print('')
                print(
                    f'{model_name}|time:{(time.time() - time_start):.2f}|Loss_train:{train_loss:.4f}|Learning_rate:{optm.state_dict()["param_groups"][0]["lr"]:.4f}\n'
                    f'Loss_valid:{valid_loss:.4f}|MAE:{ms[0]:.4f}|RMSE:{ms[1]:.4f}|R2:{ms[2]:.4f}|MBE:{ms[3]:.4f}',
                    flush=True)

                if earlystopping.early_stop:
                    print("Early stopping")
                    break
        except KeyboardInterrupt:
            print("Training interrupted by user")

        plt.figure(figsize=(10, 6))
        plt.plot(np.arange(len(train_losses)), train_losses, label="train loss")
        plt.plot(np.arange(len(valid_losses)), valid_losses, label="valid loss")
        plt.legend()
        plt.xlabel("epochs")
        plt.title("Train_loss & Valid_loss")
        plt.savefig(f"data_record/{dataset}/{model_name}_loss.png")
        plt.show()

    with open(model_save, "rb") as f:
        model = torch.load(f, map_location=device, pickle_module=dill)

    test_loss, ms_test, test_labels, test_preds = evaluate(data=test_loader, model=model, criterion=criterion_MAE,
                                                           device=device, scalar=scalar)

    print(
        f'Test_valid:{test_loss:.4f}|MAE:{ms_test[0]:.4f}|RMSE:{ms_test[1]:.4f}|R2:{ms_test[2]:.4f}|MBE:{ms_test[3]:.4f}')

    with open(f'data_record/{dataset}/Metrics_{model_name}.csv', 'a', encoding='utf-8', newline='') as f:
        csv_write = csv.writer(f)
        csv_write.writerow([f'{site}_pred1_{model_name}', ms_test[0], ms_test[1], ms_test[2], ms_test[3]])

    # 绘制时序预测图（使用时间戳）
    plt.figure(figsize=(12, 6))
    time_axis = timestamp_test['date'].values[time_length:]
    time_axis = time_axis[:len(test_labels)]
    plt.plot(time_axis, test_labels[:, 0], label="True Values", color='blue')
    plt.plot(time_axis, test_preds[:, 0], label="Predicted Values", color='orange', linestyle='--')
    plt.legend()
    plt.xlabel("Time")
    plt.ylabel("Values")
    plt.title("Time Series: True vs Predicted Values")
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig(f"data_record/{dataset}/{model_name}_true_pred.png")
    plt.show()

    # 绘制预测误差分布图
    plt.figure(figsize=(10, 6))
    errors = test_preds[:, 0] - test_labels[:, 0]
    sns.histplot(errors, kde=True, color='purple')
    plt.axvline(x=0, color='red', linestyle='--', label='Zero Error')
    plt.axvline(x=np.mean(errors), color='green', linestyle='-', label='Mean Error')
    plt.legend()
    plt.xlabel("Prediction Error")
    plt.title("Error Distribution")
    plt.savefig(f"data_record/{dataset}/{model_name}_error_dist.png")
    plt.show()

    # 绘制累积误差图
    plt.figure(figsize=(10, 6))
    cum_errors = np.cumsum(errors)
    plt.plot(cum_errors, color='purple')
    plt.axhline(y=0, color='red', linestyle='--')
    plt.xlabel("Sample Index")
    plt.ylabel("Cumulative Error")
    plt.title("Cumulative Error Plot")
    plt.savefig(f"data_record/{dataset}/{model_name}_cum_error.png")
    plt.show()

    # 绘制一天功率曲线图（从00:00到23:00，刻度间隔为1小时）
    plt.figure(figsize=(12, 6))
    points_per_day = 24  # 每小时一个数据点，一天24个点

    # 找到第一个00:00的时间点
    time_axis = timestamp_test['date'][time_length:time_length + len(test_labels)]
    start_idx = None
    for i, dt in enumerate(time_axis):
        if dt.hour == 0 and dt.minute == 0:  # 查找第一个00:00
            start_idx = i
            break

    if start_idx is None:
        print("未找到00:00的时间点，使用测试集第一天数据")
        start_idx = 0

    # 确保不会超出数据范围
    end_idx = start_idx + points_per_day
    if end_idx > len(test_labels):
        print(f"数据不足以覆盖完整一天（从{start_idx}到{end_idx}），使用可用数据")
        end_idx = len(test_labels)

    # 提取一天的真实值和预测值
    day_labels = test_labels[start_idx:end_idx, 0]
    day_preds = test_preds[start_idx:end_idx, 0]

    # 提取时间戳并获取小时信息
    day_time_axis = time_axis[start_idx:end_idx]
    hours = [dt.strftime('%H:%M') for dt in day_time_axis]

    # 如果数据不足24小时，打印提示
    if len(hours) < points_per_day:
        print(f"时间点不足24小时，仅有{len(hours)}个点")

    # 绘制曲线
    plt.plot(hours, day_labels, label="True Values", color='blue')
    plt.plot(hours, day_preds, label="Predicted Values", color='orange', linestyle='--')
    plt.legend()
    plt.xlabel("Time of Day")
    plt.ylabel("Power")
    plt.title("Daily Power Curve: True vs Predicted")
    # 设置X轴刻度，每1小时显示一个刻度
    plt.xticks(ticks=range(0, len(hours), 1), labels=[hours[i] for i in range(0, len(hours), 1)], rotation=45,
               fontsize=8)
    plt.tight_layout()
    plt.savefig(f"data_record/{dataset}/{model_name}_daily_power_curve.png")
    plt.show()