import math
import pandas as pd
import numpy as np
import csv

x = [[0, 1, "no"], [0, 1, "no"], [1, 0, "no"], [1, 1, "yes"], [1, 1, "no"], [1, 1, "no"], [1, 1, "maybe"],
     [0, 1, "maybe"], [2, 0, "maybe"], [1, 1, "yes"], [0, 0, "no"], [1, 1, "no"], [1, 1, "maybe"], [1, 1, "maybe"],
     [1, 1, "maybe"]]


# x=[[0,1,"no"],[0,1,"no"],[1,0,"no"],[1,1,"yes"],[1,1,"yes"],]
def majorityCnt(clasList):
    classCount = {}
    for label in clasList:
        if label not in classCount.keys():
            classCount[label] = 0
        classCount[label] += 1
    max_value = max(classCount.values())
    for index in classCount.keys():
        if classCount[index] == max_value:
            return index


def splitDataset(dataset, BestFeature, feature_i_j):
    new_set = []
    for example in dataset:
        if example[BestFeature] == feature_i_j:
            new_set.append(example[:BestFeature] + example[BestFeature + 1:])

    return new_set


def computeEntropy(subData, feature_i_j):
    value_class = set()
    for data in subData:
        value_class.add(data[-1])
    entropy = 0
    value_class_statistics = {}
    for sample in subData:
        if sample[-1] not in value_class_statistics.keys():
            value_class_statistics[sample[-1]] = 0
        value_class_statistics[sample[-1]] += 1
    for value in value_class:
        # len_value=float(len([sample for sample in subData ]))
        prob = value_class_statistics[value] / float(len(subData))
        entropy -= prob * math.log(prob, 2)

    return entropy


def chooseBestFeature(dataset):
    info_entropy = []
    base_entropy = computeEntropy(dataset, 0)
    # print(base_entropy)
    feat_len = len(dataset[0][:-1])

    for feature_i in range(feat_len):
        #
        # print(feature_i,dataset[:])
        # print(dataset[:][feature_i])
        feature_i_species = {0, 1}
        # feature_i_species=set(dataset[:][feature_i])
        # print(feature_i_species)
        entropy = 0
        subData = []
        for feature_i_j in feature_i_species:
            # entropy=math.log(feature_i_j,2)
            for example in dataset:
                if example[feature_i] == feature_i_j:
                    subData.append(example)
            prob = float(len(subData)) / float(len(dataset))
            entropy += prob * computeEntropy(subData, feature_i_j)  # 加权求平均熵
            subData.clear()
        info_entropy.append(entropy)
    return info_entropy.index(min(info_entropy))


def createTree(dataset, labels0=None):
    labels = labels0[:]
    classList = [example[-1] for example in dataset]
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataset[0]) == 1:
        return majorityCnt(classList)  # 标签用光了
    # for
    DTree = {}
    feature_i_species = set()
    BestFeature_index = chooseBestFeature(dataset)
    BestFeature = labels[BestFeature_index]
    if BestFeature == "quality":
        print("wrong")
    del (labels[chooseBestFeature(dataset)])

    # 删除列
    for example in dataset:
        feature_i_species.add(example[BestFeature_index])
    DTree = {BestFeature: {}}
    for feature_i_j in feature_i_species:
        if labels != None:
            sublabels = labels[:]
            DTree[BestFeature][feature_i_j] = createTree(splitDataset(dataset, BestFeature_index, feature_i_j),
                                                         sublabels)
        else:
            DTree[BestFeature][feature_i_j] = createTree(splitDataset(dataset, BestFeature_index, feature_i_j))

    return DTree


def dichotomy(data, median=None):
    # 可以用于清洗额外的数据
    if median != None:
        for i in range(len(data)):
            for j in range(len(data[0])):
                if data[i][j] <= median[j]:
                    data[i][j] = 0
                else:
                    data[i][j] = 1
        return data
    # median=[0]*len(data[0])
    mid = np.zeros(data.shape)
    new_data = np.sort(data, axis=0)
    median = new_data[data.shape[0] // 2, :]

    for i in range(len(data)):
        for j in range(len(data[0])):
            if data[i][j] <= median[j]:
                data[i][j] = 0
            else:
                data[i][j] = 1
    #         mid[j].append(data[i][j])
    # s_mid=[sorted(j) for j in mid]
    # median=[i for i in s_mid[:][len(data)//2]]
    # for i in range(len(data)):
    #     for j in range(len(data[0])):
    #         if data[i][j] < median[j]:
    #             data[i][j]=0
    #         else:
    #             data[i][j]=1
    return data, median


def classify(inputTree, featLabels, testVec):  # 一次测试一个

    x = list(inputTree.keys())[0]
    secondDict = inputTree[x]
    featIndex = featLabels.index(x)  # 主意标签转换
    for key in secondDict.keys():
        if testVec[featIndex] == key:
            if type(secondDict[key]).__name__ == "dict":
                # print(secondDict,key)

                classLabel = classify(secondDict[key], featLabels, testVec)

            else:
                classLabel = secondDict[key]

            return classLabel


def cleanData(path='winequality-red.csv'):
    f = csv.reader(open(path, 'r'))
    flag = 0
    Data = []
    for i in f:
        data = i[0].split(";")
        if flag != 0:
            data = [eval(j) for j in data]
        flag += 1
        Data.append(data)

    numbers_data = np.array(Data[1:])
    bio_data, media = dichotomy(numbers_data[:, :-1])
    numbers_data = np.append(bio_data, numbers_data[:, -1:], axis=1)  # 二分法
    Data = [Data[0]] + numbers_data.tolist()

    return Data, media


Data, media = cleanData()  # 清洗数据，再分训练和测试
Train_data, Test_data = Data[1:int(len(Data) * 1599 / 1600)], Data[int(len(Data) * 1 / 1600):]

Mytree = createTree(Train_data, Data[0])

print(Mytree)
correct_n = 0
for Test_vec in Test_data:
    x = classify(Mytree, Data[0], Test_vec)
    if x == Test_vec[-1]:
        correct_n += 1
    print(x, Test_vec[-1])
print(correct_n, correct_n / (5 / 6 * len(Data)))

# print(createTree(x))