import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import os


def create_linear_model():
    model = nn.Sequential(
        nn.Linear(1, 32),
        nn.ReLU(),
        nn.Linear(32, 128),
        nn.ReLU(),
        nn.Linear(128, 64),
        nn.ReLU(),
        nn.Linear(64, 1)
    )
    return model

def data_Min_Max():
# 数据预处理与归一化
#利用均值和标准差来扩充数据结果
    input_path = os.path.join('数据', '数据.xlsx')

    train_Sheet = 'Sheet1'
    dataset = pd.read_excel(input_path, sheet_name=train_Sheet)

    x = dataset.iloc[:, 0].values.reshape(-1, 1).astype(float)
    y = dataset.iloc[:, 5].values.reshape(-1, 1).astype(float)
    #变异系数
    # cv=0.25
    # #利用变异系数
    # mean_x=np.mean(x)
    # mean_y=np.mean(y)
    # std_x=mean_x*cv
    # std_y=mean_y*cv


    mean_x = np.mean(x)
    std_x = np.std(x)
    mean_y = np.mean(y)
    std_y = np.std(y)

    x_normalized = (x - mean_x) / std_x
    y_normalized = (y - mean_y) / std_y

    # 打乱数据
    np.random.seed(7)  # 可选：设置随机种子保证结果可复现
    indices = np.random.permutation(len(x_normalized))
    x_normalized = x_normalized[indices]
    y_normalized = y_normalized[indices]

    split_index = int(len(x_normalized) * 0.8)
    train_x, test_x = x_normalized[:split_index], x_normalized[split_index:]
    train_y, test_y = y_normalized[:split_index], y_normalized[split_index:]

    return train_x, train_y, test_x, test_y, mean_y, std_y



# 训练函数
def train(epoch):
    model.train()
    for batch_idx, (input, target) in enumerate(train_loader):
        optimizer.zero_grad()
        pre_outputs = model(input)
        loss = loss_function(pre_outputs, target)
        loss.backward()  # 反向传播梯度
        optimizer.step()
    if (epoch) % 100 == 0:
        print("Epoch {} / {}".format(epoch, EPOCHS))
        return epoch

# 测试函数
def test(model,test_loader,mean_y, std_y):
    model.eval()
    pre_outputs = []  # 存储预测值
    true_outputs = []  # 存储实际值
    with torch.no_grad():
        for input, target in test_loader:
            output = model(input)
            # 将预测值转换为numpy数组
            pre_output_np = output.cpu().numpy()
            pre_outputs.append(pre_output_np * std_y + mean_y)

            # 将实际值转换为numpy数组并反归一化
            true_output_np = target.cpu().numpy()
            true_outputs.append(true_output_np *std_y + mean_y)

    # 将列表转换为numpy数组，并合并为一个二维数组
    pre_outputs = np.concatenate(pre_outputs, axis=0).flatten()
    true_outputs = np.concatenate(true_outputs, axis=0).flatten()

    mse = mean_squared_error(true_outputs, pre_outputs)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(true_outputs, pre_outputs)
    r2 = r2_score(true_outputs, pre_outputs)

    print(f"Mean Squared Error (MSE): {mse:.8f}")
    print(f"Root Mean Squared Error (RMSE): {rmse:.8f}")
    print(f"Mean Absolute Error (MAE): {mae:.8f}")
    print(f"  R² Score: {r2:.8f}")

    # 创建DataFrame并保存到Excel
    df = pd.DataFrame({
        'Predicted': pre_outputs.flatten(),
        'Actual': true_outputs.flatten(),
    })
    df['差值'] = df['Actual'] - df['Predicted']
    df['相对误差']=df['差值']/df['Actual']
    df['相对误差'] = df['相对误差'].apply(lambda x: f"{x * 100:.2f}%")
    os.makedirs('结果', exist_ok=True)
    # 构建完整保存路径
    saving_path_results = os.path.join('结果', 'B_DNN5_相对误差.xlsx')

    df.to_excel(saving_path_results, index=False)
    plt.figure(figsize=(10, 6))
    plt.scatter(true_outputs, pre_outputs, alpha=0.6)
    plt.plot([min(true_outputs), max(true_outputs)],
             [min(true_outputs), max(true_outputs)], 'r--')
    plt.xlabel('Actual Values')
    plt.ylabel('Predicted Values')
    plt.title('Actual vs Predicted Values')
    plt.show()

    # 误差分布直方图
    plt.figure(figsize=(10, 6))
    errors = true_outputs - pre_outputs
    plt.hist(errors, bins=30)
    plt.xlabel('Prediction Error')
    plt.ylabel('Count')
    plt.title('Error Distribution')
    plt.show()


if __name__ == "__main__":
    # 数据生成
    train_inputs,train_outputs ,test_inputs,test_outputs,mean_y, std_y= data_Min_Max()
    # 数据集封装

    train_dataset = TensorDataset(torch.tensor(train_inputs, dtype=torch.float32),
                                 torch.tensor(train_outputs, dtype=torch.float32))
    test_dataset = TensorDataset(torch.tensor(test_inputs, dtype=torch.float32),
                                  torch.tensor(test_outputs, dtype=torch.float32))
    # 数据加载器
    BATCH_SIZE = 32
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
    # 初始化模型、优化器和损失函数
    model = create_linear_model()
    #几种学习率下降的算法
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    loss_function = nn.MSELoss(reduction='mean')
    # 超参数设置
    EPOCHS =3000
    losts, epoch_records = [], []
    for epoch in range(EPOCHS):
        train(epoch)

    # 保存模型到文件夹
    torch.save(model.state_dict(), os.path.join('模型', 'B_DNN5_model.pth'))

    # 从文件夹加载模型
    model.load_state_dict(torch.load(os.path.join('模型', 'B_DNN5_model.pth')))

    model.eval()
    test(model, test_loader,mean_y, std_y)