import numpy as np
import pandas as pd


def entropy_weight_method(data):
    """
    熵值法计算权重
    参数:
        data: numpy数组或DataFrame, 行为样本(80), 列为指标(10)
    返回:
        weights: 各指标权重
        e_j: 各指标的信息熵
    """
    # 1. 数据标准化 (避免log0的情况，进行微小平移)
    data = np.array(data)
    data = (data - np.min(data, axis=0)) / (np.max(data, axis=0) - np.min(data, axis=0) + 1e-10) + 1e-10

    # 2. 计算每个样本在每个指标下的比重
    p_ij = data / np.sum(data, axis=0)

    # 3. 计算每个指标的信息熵
    k = 1 / np.log(data.shape[0])  # 80个样本
    e_j = -k * np.sum(p_ij * np.log(p_ij), axis=0)

    # 4. 计算信息熵冗余度
    d_j = 1 - e_j

    # 5. 计算各指标权重
    weights = d_j / np.sum(d_j)

    return weights, e_j


# 示例使用
if __name__ == "__main__":
    # 生成随机数据 (80个样本，10个指标)
    np.random.seed(42)
    data = np.random.rand(80, 10) * 100

    # 转换为DataFrame (可选)
    columns = [f'指标{i + 1}' for i in range(10)]
    df = pd.DataFrame(data, columns=columns)

    data = np.array(df)
    data = (data - np.min(data, axis=0)) / (np.max(data, axis=0) - np.min(data, axis=0) + 1e-10) + 1e-10

    # 2. 计算每个样本在每个指标下的比重
    p_ij = data / np.sum(data, axis=0)

    # 3. 计算每个指标的信息熵
    k = 1 / np.log(data.shape[0])  # 80个样本
    e_j = -k * np.sum(p_ij * np.log(p_ij), axis=0)

    # 4. 计算信息熵冗余度
    d_j = 1 - e_j

    # 5. 计算各指标权重
    weights = d_j / np.sum(d_j)




    # 计算权重
    weights, e_j = entropy_weight_method(df)

    # 打印结果
    print("各指标信息熵:")
    for i, entropy in enumerate(e_j):
        print(f"指标{i + 1}: {entropy:.4f}")

    print("\n各指标权重:")
    for i, weight in enumerate(weights):
        print(f"指标{i + 1}: {weight:.4f}")

    print(f"\n权重总和: {np.sum(weights):.4f}")