import numpy as np
import pandas as pd

# 读取数据
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')

# 填补缺失值
train_data['Age'].fillna(train_data['Age'].median(), inplace=True)
train_data['Embarked'].fillna(train_data['Embarked'].mode()[0], inplace=True)
test_data['Age'].fillna(test_data['Age'].median(), inplace=True)
test_data['Fare'].fillna(test_data['Fare'].median(), inplace=True)

# 将性别和登船港口转换为数值
train_data['Sex'] = train_data['Sex'].map({'male': 0, 'female': 1})
test_data['Sex'] = test_data['Sex'].map({'male': 0, 'female': 1})

train_data['Embarked'] = train_data['Embarked'].map({'C': 0, 'Q': 1, 'S': 2})
test_data['Embarked'] = test_data['Embarked'].map({'C': 0, 'Q': 1, 'S': 2})

# 选择特征和标签
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
X_train = train_data[features].values
y_train = train_data['Survived'].values
X_test = test_data[features].values


# values, counts = np.unique(X_train[0], return_counts=True)
# for i, value in enumerate(values):
#     print(i, "_", value)
#     print("X_train[0]: ", X_train[0])
#     print('X_train[0] == values[i]: ', X_train[0] == values[i])
# print(values)
# print(counts)
# print('')


def entropy(y):
    """
    计算给定数据集的熵
    :param y: 训练数据的目标值列表
    :return:
    """
    values, counts = np.unique(y, return_counts=True)
    probs = counts / len(y)
    return -np.sum(probs * np.log2(probs))


def information_gain(y, X_feat):
    """
    计算某个特征的信息增益
    :param y: 目标值列表
    :param X_feat: X中某个特征的列表
    :return: 信息增益值
    """
    # 计算整体信息熵
    parent_entropy = entropy(y)
    # 获取X特征数据集中，不一样的值和不一样的值的数量
    values, counts = np.unique(X_feat, return_counts=True)
    # 计算X特征的条件熵
    weighted_entropy = np.sum([(counts[i] / np.sum(counts)) * entropy(y[X_feat == value])
                               for i, value in enumerate(values)])
    # 计算信息增益
    return parent_entropy - weighted_entropy


def best_feat_to_split(X, y):
    information_gains = [information_gain(y, X[:, i]) for i in range(X.shape[1])]
    return np.argmax(information_gains)


xx = best_feat_to_split(X_train, y_train)
print(xx)

# class DecisionTreeID3:
#     def __init__(self, depth=0, max_depth=None):
