import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import os
from matplotlib.colors import LogNorm


def create_linear_model():
    model = nn.Sequential(
        nn.Linear(1, 32),
        nn.ReLU(),
        nn.Linear(32, 128),
        nn.ReLU(),
        nn.Linear(128, 64),
        nn.ReLU(),
        nn.Linear(64, 1)
    )
    return model

def data_Min_Max():
    # 读取原始数据
    input_path = os.path.join('数据', 'DNN3_result.xlsx')
    train_Sheet = 'Sheet1'
    dataset = pd.read_excel(input_path, sheet_name=train_Sheet)

    x = dataset.iloc[:, 0].values.reshape(-1, 1).astype(float)
    y = dataset.iloc[:, 1].values.reshape(-1, 1).astype(float)

    # 打乱数据 (在原始数据上打乱)
    np.random.seed(5)
    indices = np.random.permutation(len(x))
    x_shuffled = x[indices]
    y_shuffled = y[indices]

    # 划分训练测试集 (使用原始数据)
    split_index = int(len(x_shuffled) * 0.8)
    train_x_raw, test_x_raw = x_shuffled[:split_index], x_shuffled[split_index:]
    train_y_raw, test_y_raw = y_shuffled[:split_index], y_shuffled[split_index:]

    # 计算归一化参数 (仅用训练集)
    min_x, max_x = np.min(train_x_raw), np.max(train_x_raw)
    min_y, max_y = np.min(train_y_raw), np.max(train_y_raw)

    # 避免除零错误 (如果数据全为同一值)
    range_x = max_x - min_x if (max_x - min_x) != 0 else 1.0
    range_y = max_y - min_y if (max_y - min_y) != 0 else 1.0

    # 执行Min-Max归一化
    train_x = (train_x_raw - min_x) / range_x
    train_y = (train_y_raw - min_y) / range_y
    test_x = (test_x_raw - min_x) / range_x
    test_y = (test_y_raw - min_y) / range_y

    return train_x, train_y, test_x, test_y, min_y, max_y


# 训练函数
def train(epoch):
    model.train()
    for batch_idx, (input, target) in enumerate(train_loader):
        optimizer.zero_grad()
        pre_outputs = model(input)
        loss = loss_function(pre_outputs, target)
        loss.backward()  # 反向传播梯度
        optimizer.step()
    if (epoch) % 100 == 0:
        print("Epoch {} / {}".format(epoch, EPOCHS))
    return epoch


# 设置全局绘图参数（符合SCI期刊格式要求）
plt.rcParams.update({
    "font.family": "serif",  # 使用衬线字体
    "font.serif": ["Times New Roman"],  # 指定Times New Roman字体
    "font.size": 11,  # 基础字体大小
    "axes.labelsize": 11,  # 坐标轴标签字体大小
    "axes.titlesize": 12,  # 标题字体大小
    "xtick.labelsize": 10,  # X轴刻度标签大小
    "ytick.labelsize": 10,  # Y轴刻度标签大小
    "xtick.direction": "in",  # 刻度线朝内
    "ytick.direction": "in",  # 刻度线朝内
    "axes.linewidth": 1.5,  # 坐标轴线宽
    "lines.linewidth": 1.5,  # 绘图线宽
    "savefig.dpi": 600,  # 保存图像分辨率
    "mathtext.fontset": "stix",  # 数学公式字体与正文一致
})


def plot_sci_scatter(true, pred):
    fig, ax = plt.subplots(figsize=(4.5, 4.5), dpi=600)

    # ============== 计算相对误差 ==============
    epsilon = 1e-8  # 防止除以0
    relative_error = np.abs((pred - true) / (true + epsilon))  # 相对误差绝对值

    # ============== 绘制散点（颜色映射改为相对误差） ==============
    sc = ax.scatter(
        true, pred,
        c=relative_error,  # 颜色映射基于相对误差
        s=35,
        cmap='viridis',  # 改为单色渐变色
        alpha=0.8,
        edgecolors='w',
        linewidths=0.4,
        norm=LogNorm(vmin=1e-4, vmax=1),  # 对数归一化（适用于误差量级差异大的情况）
        zorder=3
    )

    # ============== 参考线配置 ==============
    # 智能坐标范围（排除1%极端值，保证主要数据区域显示）
    data_min = np.percentile([true, pred], 1)  # 1%分位数
    data_max = np.percentile([true, pred], 99)  # 99%分位数
    ax.plot(
        [data_min, data_max], [data_min, data_max],
        '--',  # 虚线样式
        color='#2d2d2d',  # 深灰色提高对比度
        lw=1.2,  # 线宽略粗于默认值
        alpha=0.9,  # 适当降低透明度
        zorder=2  # 绘制层级（在散点下方）
    )

    # ============== 颜色条优化 ==============
    cbar = fig.colorbar(sc,ax=ax,pad=0.02,aspect=30)
    cbar.ax.tick_params(labelsize=9)  # 颜色条刻度标签大小
    cbar.set_label('Relative Error', fontsize=10, labelpad=2)

    # ============== 坐标轴与标签 ==============
    ax.set(
        xlim=(data_min, data_max),  # X轴范围与参考线对齐
        ylim=(data_min, data_max),  # Y轴范围相同保证正方形显示
        xlabel='True Value',  # X轴标签（根据实际数据替换）
        ylabel='Predicted Value'  # Y轴标签
    )

    # ============== 统计信息标注 ==============
    stats_text = ('(b)')
    ax.text(
        0.05, 0.95,  # 位于左上角（坐标系相对位置）
        stats_text,
        transform=ax.transAxes,  # 使用相对坐标系
        ha='left', va='top',  # 左对齐，顶部对齐
        fontsize=10,
        bbox=dict(
            facecolor='white', alpha=0.9,  # 半透明白底背景
            edgecolor='0.8',  # 浅灰色边框
            boxstyle='round,pad=0.3'  # 圆角边框，内边距0.3
        )
    )

    # ============== 网格与边框优化 ==============
    ax.grid(True,ls=':',color='gray',alpha=0.4)
    # 强化边框线宽（与刻度线协调）
    for spine in ax.spines.values():
        spine.set_linewidth(1.2)

    # ============== 布局与保存 ==============
    plt.tight_layout(pad=1.5)  # 紧凑布局（留白控制）

    # 创建输出目录（如果不存在）
    os.makedirs('PDF', exist_ok=True)

    # 保存双版本图片（位图+矢量图）
    save_path = os.path.join('PDF', 'F_DNN3')
    plt.savefig(f'{save_path}.pdf', transparent=True)  # 矢量图适合编辑
    plt.close()  # 显式关闭图形释放内存

def test(model, test_loader, mean_y, std_y):
    model.eval()
    pre_outputs = []
    true_outputs = []

    with torch.no_grad():
        for input, target in test_loader:
            output = model(input)
            pre_outputs.append(output.cpu().numpy() * std_y + mean_y)
            true_outputs.append(target.cpu().numpy() * std_y + mean_y)

    pre_outputs = np.concatenate(pre_outputs).flatten()
    true_outputs = np.concatenate(true_outputs).flatten()

    # 计算指标
    mse = mean_squared_error(true_outputs, pre_outputs)
    rmse = np.sqrt(mse)

    mae = mean_absolute_error(true_outputs, pre_outputs)
    r2 = r2_score(true_outputs, pre_outputs)
    mape=np.mean(np.abs((true_outputs-pre_outputs)/true_outputs)*100)

    print(f"MSE: {mse:.4f}, RMSE: {rmse:.4f}")
    print(f"MAE: {mae:.4f}, R²: {r2:.4f},MAPE:{mape:.4f}")

    # 保存结果
    df = pd.DataFrame({
        'Actual': true_outputs,
        'Predicted': pre_outputs,
        'Error': true_outputs - pre_outputs,
        'Relative Error': (true_outputs - pre_outputs) / true_outputs
    })
    os.makedirs('结果', exist_ok=True)
    df.to_excel(os.path.join('结果', 'F_DNN2_Results.xlsx'), index=False)

    # 调用SCI绘图函数
    plot_sci_scatter(true_outputs, pre_outputs)

if __name__ == "__main__":
    # 数据生成
    train_inputs,train_outputs ,test_inputs,test_outputs,mean_y, std_y= data_Min_Max()
    # 数据集封装

    train_dataset = TensorDataset(torch.tensor(train_inputs, dtype=torch.float32),
                                 torch.tensor(train_outputs, dtype=torch.float32))
    test_dataset = TensorDataset(torch.tensor(test_inputs, dtype=torch.float32),
                                  torch.tensor(test_outputs, dtype=torch.float32))
    # 数据加载器
    BATCH_SIZE = 32
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
    # 初始化模型、优化器和损失函数
    model = create_linear_model()
    #几种学习率下降的算法
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    loss_function = nn.MSELoss(reduction='mean')
    # 超参数设置
    EPOCHS =3000
    losts, epoch_records = [], []
    for epoch in range(EPOCHS):
        train(epoch)

    torch.save(model.state_dict(), os.path.join('模型', 'F_DNN3_model.pth'))

    # 从文件夹加载模型
    model.load_state_dict(torch.load(os.path.join('模型', 'F_DNN3_model.pth')))

    model.eval()
    test(model, test_loader,mean_y, std_y)