import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import math
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import os


# 模型定义
def create_linear_model():
    model = nn.Sequential(
        nn.Linear(1, 32),
        nn.ReLU(),
        nn.Linear(32, 128),
        nn.ReLU(),
        nn.Linear(128, 64),
        nn.ReLU(),
        nn.Linear(64, 1)
    )
    return model

def data_Min_Max():
# 数据预处理与归一化
#利用均值和标准差来扩充数据结果
    input_path = os.path.join('数据', '数据.xlsx')

    train_Sheet = 'Sheet1'
    dataset = pd.read_excel(input_path, sheet_name=train_Sheet)

    x = dataset.iloc[:, 0].values.reshape(-1, 1).astype(float)
    y = dataset.iloc[:, 5].values.reshape(-1, 1).astype(float)
    #变异系数
    # cv=0.25
    # #利用变异系数
    # mean_x=np.mean(x)
    # mean_y=np.mean(y)
    # std_x=mean_x*cv
    # std_y=mean_y*cv


    mean_x = np.mean(x)
    std_x = np.std(x)
    mean_y = np.mean(y)
    std_y = np.std(y)


    x_normalized = (x - mean_x) / std_x
    y_normalized = (y - mean_y) / std_y

    # 打乱数据
    np.random.seed(7)  # 可选：设置随机种子保证结果可复现
    indices = np.random.permutation(len(x_normalized))
    x_normalized = x_normalized[indices]
    y_normalized = y_normalized[indices]

    split_index = int(len(x_normalized) * 0.8)
    train_x, test_x = x_normalized[:split_index], x_normalized[split_index:]
    train_y, test_y = y_normalized[:split_index], y_normalized[split_index:]

    return train_x, train_y, test_x, test_y, mean_y, std_y

# 莱维飞行生成随机步长
def levy_flight(dim):
    beta = 1.2
    sigma = (math.gamma(1 + beta) * np.sin(np.pi * beta / 2) / (
                math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))) ** (1 / beta)
    u = np.random.normal(0, sigma, dim)
    v = np.random.normal(0, 1, dim)
    step = u / (np.abs(v) ** (1 / beta))
    return step


# 梯度下降局部优化（核心改进）
def gradient_descent_step(model, weights, train_loader, lr=0.01, steps=10):
    """对当前权重进行多步梯度下降优化"""
    set_weight(model, weights)  # 加载权重到模型
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    for _ in range(steps):
        model.train()
        for inputs, targets in train_loader:
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, targets)
            loss.backward()
            optimizer.step()

    # 将更新后的权重扁平化返回
    new_weights = np.concatenate([param.detach().cpu().numpy().flatten() for param in model.parameters()])
    return new_weights


# 布谷鸟搜索算法（含动态步长和局部优化）
def cuckoo_search(model, train_loader, loss_func, n_nest=100, pa=0.25, max_iter=3000):
    # 初始化种群
    init_weights = [p.detach().cpu().numpy() for p in model.parameters()]
    dim = sum(w.size for w in init_weights)
    init_flat = np.concatenate([w.flatten() for w in init_weights])

    nests = np.array([init_flat + np.random.randn(dim) * 0.01 for _ in range(n_nest)])
    best_nest = nests[0].copy()
    best_loss = evaluate_model(model, train_loader, loss_func, best_nest)

    for epoch in range(max_iter):
        # 动态参数计算
        decay = np.exp(-epoch / (max_iter * 0.5))
        lr = 0.001 * decay

        # ---------- 全局搜索 ----------
        for i in range(n_nest):
            # 莱维飞行公式
            step = 0.1 * decay * levy_flight(dim)
            new_nest = nests[i] + step * np.random.randn(dim)

            # 评估与更新
            curr_loss = evaluate_model(model, train_loader, loss_func, new_nest)
            if curr_loss < best_loss:
                best_nest = new_nest.copy()
                best_loss = curr_loss
                nests[i] = new_nest.copy()

        # ---------- 局部搜索 ----------
        for j in range(n_nest):
            if np.random.rand() < pa:
                # 差分扰动公式
                r1, r2 = np.random.choice([k for k in range(n_nest) if k != j], 2, False)
                e = np.random.rand()
                H = 1 if (e - pa) > 0 else 0
                # perturbation = np.random.rand() * H * (nests[r1] - nests[r2])
                # 修改后（动态扰动因子，随迭代衰减）
                perturbation_factor = 0.5 * decay  # 扰动强度随迭代衰减
                perturbation = perturbation_factor * H * (nests[r1] - nests[r2])

                # 生成新解
                new_nest = nests[j] + perturbation
                new_nest = gradient_descent_step(model, new_nest, train_loader, lr=lr)

                # 择优替换
                new_loss = evaluate_model(model, train_loader, loss_func, new_nest)
                old_loss = evaluate_model(model, train_loader, loss_func, nests[j])
                if new_loss < old_loss:
                    nests[j] = new_nest.copy()
                    if new_loss < best_loss:
                        best_nest = new_nest.copy()
                        best_loss = new_loss

        # 早停检测
        if epoch > 500 and abs(prev_loss - best_loss) < 1e-6:
            break
        prev_loss = best_loss
        if (epoch) % 100 == 0:
            print("Epoch,loss",epoch,best_loss)

    return best_nest


# 其他辅助函数（保持不变）
def evaluate_model(model, train_loader, loss_function, weights):
    set_weight(model, weights)
    model.eval()
    total_loss = 0
    with torch.no_grad():
        for inputs, targets in train_loader:
            outputs = model(inputs)
            loss = loss_function(outputs, targets)
            total_loss += loss.item()
    return total_loss / len(train_loader)


def set_weight(model, weights):
    idx = 0
    for param in model.parameters():
        param_size = param.numel()
        param_shape = param.shape
        param.data = torch.tensor(weights[idx:idx + param_size].reshape(param_shape), dtype=torch.float32)
        idx += param_size

# 测试函数
def test(model,test_loader,mean_y, std_y):
    model.eval()
    pre_outputs = []  # 存储预测值
    true_outputs = []  # 存储实际值
    with torch.no_grad():
        for input, target in test_loader:
            output = model(input)
            # 将预测值转换为numpy数组
            pre_output_np = output.cpu().numpy()
            pre_outputs.append(pre_output_np * std_y + mean_y)

            # 将实际值转换为numpy数组并反归一化
            true_output_np = target.cpu().numpy()
            true_outputs.append(true_output_np * std_y + mean_y)

    # 将列表转换为numpy数组，并合并为一个二维数组
    pre_outputs = np.concatenate(pre_outputs, axis=0).flatten()
    true_outputs = np.concatenate(true_outputs, axis=0).flatten()

    mse = mean_squared_error(true_outputs, pre_outputs)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(true_outputs, pre_outputs)
    r2 = r2_score(true_outputs, pre_outputs)

    print(f"Mean Squared Error (MSE): {mse:.8f}")
    print(f"Root Mean Squared Error (RMSE): {rmse:.8f}")
    print(f"Mean Absolute Error (MAE): {mae:.8f}")
    print(f"  R² Score: {r2:.8f}")

    # 创建DataFrame并保存到Excel
    df = pd.DataFrame({
        'Predicted': pre_outputs.flatten(),
        'Actual': true_outputs.flatten(),
    })
    df['差值'] = df['Actual'] - df['Predicted']
    df['相对误差']=df['差值']/df['Actual']
    df['相对误差'] = df['相对误差'].apply(lambda x: f"{x * 100:.2f}%")
    os.makedirs('结果', exist_ok=True)
    # 构建完整保存路径
    saving_path_results = os.path.join('结果', 'B_CS5_相对误差.xlsx')
    df.to_excel(saving_path_results, index=False)
    plt.figure(figsize=(10, 6))
    plt.scatter(true_outputs, pre_outputs, alpha=0.6)
    plt.plot([min(true_outputs), max(true_outputs)],
             [min(true_outputs), max(true_outputs)], 'r--')
    plt.xlabel('Actual Values')
    plt.ylabel('Predicted Values')
    plt.title('Actual vs Predicted Values')
    plt.show()

    # 误差分布直方图
    plt.figure(figsize=(10, 6))
    errors = true_outputs - pre_outputs
    plt.hist(errors, bins=30)
    plt.xlabel('Prediction Error')
    plt.ylabel('Count')
    plt.title('Error Distribution')
    plt.show()

if __name__=="__main__":
    train_inputs, train_outputs, test_inputs, test_outputs, mean_y, std_y = data_Min_Max()
    train_dataset = TensorDataset(torch.tensor(train_inputs, dtype=torch.float32),
                                  torch.tensor(train_outputs, dtype=torch.float32))
    test_dataset = TensorDataset(torch.tensor(test_inputs, dtype=torch.float32),
                                 torch.tensor(test_outputs, dtype=torch.float32))
    BATCH_SIZE = 32
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
    model = create_linear_model()
    loss_function = nn.MSELoss(reduction='mean')
    best_weights = cuckoo_search(model, train_loader, loss_function)
    set_weight(model, best_weights)

    model_path = os.path.join('模型', "B_cs5_model.pth")
    torch.save(model.state_dict(), model_path)
    model_path = os.path.join('模型', "B_cs5_model.pth")
    model.load_state_dict(torch.load(model_path))
    test(model, test_loader,mean_y, std_y)