import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from matplotlib import image as mpimg
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import time
import os

# 解决可能的OpenMP冲突问题
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'


class EarlyStopping:
    """早停机制"""

    def __init__(self, patience=5, delta=0, verbose=False):
        self.patience = patience
        self.delta = delta
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.inf

    def __call__(self, val_loss, model):
        score = -val_loss

        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
        elif score < self.best_score + self.delta:
            self.counter += 1
            if self.verbose:
                print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
            self.counter = 0

    def save_checkpoint(self, val_loss, model):
        """保存最佳模型"""
        if self.verbose:
            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model...')
        torch.save(model.state_dict(), 'checkpoint.pt')
        self.val_loss_min = val_loss


class TimeSeriesDataset(Dataset):
    """序列数据集"""

    def __init__(self, X, y):
        self.X = [torch.FloatTensor(x) for x in X]
        self.y = [torch.FloatTensor(y) for y in y]

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return self.X[idx], self.y[idx]


class FCNet(nn.Module):
    """MLP模型"""

    def __init__(self, input_size, hidden_size=64, output_size=1):
        super(FCNet, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size)
        )

    def forward(self, x):
        return self.net(x)


def prepare_data(data, time_step, pred_step):
    """准备训练数据"""
    X, y = [], []
    for i in range(data.shape[1] - time_step - pred_step + 1):
        X.append(data[:, i:i + time_step])
        y.append(data[:, i + time_step:i + time_step + pred_step].squeeze())
    return X, y


def train_val_test_split(data, train_ratio=0.7, val_ratio=0.15):
    """划分训练集、验证集和测试集"""
    total_length = data.shape[1]
    train_end = int(total_length * train_ratio)
    val_end = train_end + int(total_length * val_ratio)

    train_data = data[:, :train_end]
    val_data = data[:, train_end:val_end]
    test_data = data[:, val_end:]

    return train_data, val_data, test_data


def train_model(model, train_loader, val_loader, optimizer, criterion, epochs=10, clip_value=1.0, patience=5):
    """训练模型"""
    early_stopping = EarlyStopping(patience=patience, verbose=True)

    train_losses = []
    val_losses = []

    for epoch in range(epochs):
        # 训练阶段
        model.train()
        epoch_train_loss = 0.0
        for inputs, targets in train_loader:
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets.unsqueeze(-1))
            loss.backward()
            torch.nn.utils.clip_grad_value_(model.parameters(), clip_value)
            optimizer.step()
            epoch_train_loss += loss.item()

        avg_train_loss = epoch_train_loss / len(train_loader)
        train_losses.append(avg_train_loss)

        # 验证阶段
        model.eval()
        epoch_val_loss = 0.0
        with torch.no_grad():
            for inputs, targets in val_loader:
                outputs = model(inputs)
                loss = criterion(outputs, targets.unsqueeze(-1))
                epoch_val_loss += loss.item()

        avg_val_loss = epoch_val_loss / len(val_loader)
        val_losses.append(avg_val_loss)

        print(f'Epoch {epoch + 1}/{epochs}, Train Loss: {avg_train_loss:.4f}, Val Loss: {avg_val_loss:.4f}')

        # 早停检查
        early_stopping(avg_val_loss, model)
        if early_stopping.early_stop:
            print("Early stopping triggered")
            break

    # 加载最佳模型
    model.load_state_dict(torch.load('checkpoint.pt'))

    return model, train_losses, val_losses


def evaluate_model(model, X_test, y_test, scaler,FreqNum):
    """评估模型"""
    model.eval()
    predictions = []
    with torch.no_grad():
        for x in X_test:
            x_tensor = torch.FloatTensor(x).unsqueeze(0)
            y_pred = model(x_tensor)
            predictions.append(y_pred.numpy())

    # 重组预测结果
    pred_array = np.concatenate(predictions, axis=0)
    pred_reshaped = pred_array.reshape(-1, FreqNum).T

    # 反归一化
    pred_denorm = scaler.inverse_transform(pred_reshaped.T).T
    y_test_denorm = scaler.inverse_transform(y_test.T).T

    # 计算指标
    mse = np.mean((pred_denorm - y_test_denorm) ** 2, axis=1)
    rmse = np.sqrt(mse)
    nrmse = rmse / (scaler.data_max_ - scaler.data_min_)

    return pred_denorm, nrmse


def plot_results(y_true, y_pred, freq_num,save_folder):
    """绘制每个频点的结果图"""
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    plt.figure(figsize=(15, 10))
    for n in range(freq_num):
        plt.subplot(3, 5, n + 1)
        plt.plot(y_true[n], 'x-', label='True')
        plt.plot(y_pred[n], '.-', label='Predict')
        plt.title(f'Freq {n + 1}')
        plt.legend()
    plt.tight_layout()
    img_path = os.path.join(save_folder, "prediction_result.png")
    plt.savefig(img_path)  # 保存图像到指定文件夹
    plt.show()
    plt.close()
    return img_path
def prediction( FreqNum,
    StartFreq ,
    TimeStep,
    PredStep,
    BatchSize,
    Epochs,
    LearningRate,
    Patience,filepath,
            output_dir
                ):
    # 设置随机种子
    np.random.seed(0)
    torch.manual_seed(0)

    # 读取数据
    data = pd.read_csv(filepath, header=None).values.T[StartFreq:StartFreq + FreqNum, :]

    # 数据预处理
    scaler = MinMaxScaler()
    data_normalized = scaler.fit_transform(data.T).T

    # 划分训练集、验证集和测试集
    train_data, val_data, test_data = train_val_test_split(data_normalized, train_ratio=0.7, val_ratio=0.15)

    # 准备数据
    X_train, y_train = prepare_data(train_data, TimeStep, PredStep)
    X_val, y_val = prepare_data(val_data, TimeStep, PredStep)
    X_test, y_test = prepare_data(test_data, TimeStep, PredStep)

    # 创建数据加载器
    train_dataset = TimeSeriesDataset(X_train, y_train)
    val_dataset = TimeSeriesDataset(X_val, y_val)

    train_loader = DataLoader(train_dataset, batch_size=BatchSize, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=BatchSize, shuffle=False)

    # 初始化模型、损失函数和优化器
    model = FCNet(TimeStep)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LearningRate)

    # 训练模型
    print("Starting training...")
    model, train_losses, val_losses = train_model(
        model, train_loader, val_loader, optimizer, criterion,
        epochs=Epochs, patience=Patience
    )

    # 评估模型
    starttime = time.time()
    predictions, nrmse = evaluate_model(model, X_test, test_data[:, TimeStep:], scaler,FreqNum)
    endtime = time.time()

    # 绘制并保存结果
    img_path=plot_results(scaler.inverse_transform(test_data[:, TimeStep:].T).T, predictions, FreqNum,output_dir)

    avg_nrmse = sum(nrmse) / FreqNum
    results = {
        "nrmse_list": [round(n, 4) for n in nrmse],
        "avg_nrmse": round(avg_nrmse, 4),
        "predict_time_ms": int(round((endtime - starttime) * 1000)),
        "image_path": img_path
    }

    return results




if __name__ == "__main__":
    FreqNum = 15  # 15个频点
    StartFreq = 18  # 自己设置从哪个频点开始预测
    TimeStep = 50
    PredStep = 1
    BatchSize = 64
    Epochs = 100
    LearningRate = 0.001
    Patience = 7
    filepath='GSM1800up_data.csv'
    result=prediction(FreqNum,
    StartFreq ,
    TimeStep,
    PredStep,
    BatchSize,
    Epochs,
    LearningRate,
    Patience,filepath)


    # # 设置随机种子
    # np.random.seed(0)
    # torch.manual_seed(0)
    #
    # # 读取数据
    # data = pd.read_csv('GSM1800up_data.csv', header=None).values.T[StartFreq:StartFreq + FreqNum, :]
    #
    # # 数据预处理
    # scaler = MinMaxScaler()
    # data_normalized = scaler.fit_transform(data.T).T
    #
    # # 划分训练集、验证集和测试集
    # train_data, val_data, test_data = train_val_test_split(data_normalized, train_ratio=0.7, val_ratio=0.15)
    #
    # # 准备数据
    # X_train, y_train = prepare_data(train_data, TimeStep, PredStep)
    # X_val, y_val = prepare_data(val_data, TimeStep, PredStep)
    # X_test, y_test = prepare_data(test_data, TimeStep, PredStep)
    #
    # # 创建数据加载器
    # train_dataset = TimeSeriesDataset(X_train, y_train)
    # val_dataset = TimeSeriesDataset(X_val, y_val)
    #
    # train_loader = DataLoader(train_dataset, batch_size=BatchSize, shuffle=True)
    # val_loader = DataLoader(val_dataset, batch_size=BatchSize, shuffle=False)
    #
    # # 初始化模型、损失函数和优化器
    # model = FCNet(TimeStep)
    # criterion = nn.MSELoss()
    # optimizer = torch.optim.Adam(model.parameters(), lr=LearningRate)
    #
    # # 训练模型
    # print("Starting training...")
    # model, train_losses, val_losses = train_model(
    #     model, train_loader, val_loader, optimizer, criterion,
    #     epochs=Epochs, patience=Patience
    # )
    #
    # # 评估模型
    # starttime = time.time()
    # predictions, nrmse = evaluate_model(model, X_test, test_data[:, TimeStep:], scaler)
    # endtime = time.time()
    #
    # # 绘制并保存结果
    # plot_results(scaler.inverse_transform(test_data[:, TimeStep:].T).T, predictions, FreqNum)
    #
    # # 打印评估指标
    # print("15个频点的NRMSE:")
    # for i in range(len(nrmse)):
    #     print(f"  频点 {StartFreq + i}: {nrmse[i]:.4f}")
    # print(f"15个频点的平均NRMSE: {sum(nrmse) / 15:.4f}")
    #
    # print(f"15个频点的预测时间: {int(round((endtime - starttime) * 1000))}ms")