from math import log
import operator


def calcShannonEnt(dataSet):
    numEntires = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1][0]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntires
        shannonEnt -= prob * log(prob, 2)
    return shannonEnt


def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis] + featVec[axis + 1:]
            retDataSet.append(reducedFeatVec)
    return retDataSet



def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [tuple(example[i]) if isinstance(example[i], list) else example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)
        infoGain = baseEntropy - newEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        vote_tuple = tuple(vote)
        if vote_tuple not in classCount.keys():
            classCount[vote_tuple] = 0
        classCount[vote_tuple] += 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]



def createTree(dataSet, labels, featLabels):
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    featLabels.append(bestFeatLabel)
    myTree = {bestFeatLabel: {}}
    del (labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    # 递归构建子树
    for value in uniqueVals:
        myTree[bestFeatLabel][value] = ID3(splitDataSet(dataSet, bestFeat, value), labels, featLabels)
    return myTree


def classify(inputTree, featLabels, testVec):
    firstStr = next(iter(inputTree))
    secondDict = inputTree[firstStr]
    featIndex = featLabels.index(firstStr)
    for key in secondDict.keys():
        if testVec[featIndex] == key:
            if type(secondDict[key]).__name__ == 'dict':
                classLabel = classify(secondDict[key], featLabels, testVec)
            else:
                classLabel = secondDict[key]
    return classLabel


def load_wine_data():
    # 从当前目录加载 Wine 数据集
    with open('wine.data', 'r') as file:
        lines = file.readlines()

    # 提取特征和标签
    data = [line.strip().split(',') for line in lines if line.strip()]
    X = [list(map(float, row[1:])) for row in data]
    y = [tuple([int(row[0])]) for row in data]

    return X, y

# 调用加载数据的函数
wine_X, wine_y = load_wine_data()

# 打印前5个样本的特征和标签
print("前5个样本的特征：", wine_X[:5])
print("前5个样本的标签：", wine_y[:5])

def ID3(dataSet, labels, featLabels):
    classList = [example[0] for example in dataSet]  # 获取所有样本的类别
    # 如果所有样本属于同一类别，则返回该类别
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    # 如果样本集的特征为空（已经没有特征可分），则返回样本中类别数最多的类别
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)

    # 选择最优特征
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    featLabels.append(bestFeatLabel)

    # 初始化决策树
    myTree = {bestFeatLabel: {}}
    del labels[bestFeat]

    # 获取最优特征的所有取值
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)

    # 递归构建子树
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = ID3(splitDataSet(dataSet, bestFeat, value), subLabels, featLabels)

    return myTree


if __name__ == '__main__':
    # 使用 Wine 数据集构建决策树
    wine_labels = ['Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols',
                   'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue',
                   'OD280/OD315 of diluted wines', 'Proline']

    wine_feat_labels = []
    wine_tree = ID3(list(zip(wine_y, wine_X)), wine_labels, wine_feat_labels)

    # 打印构建好的决策树
    print("构建的决策树：", wine_tree)



