import random
import numpy as np
import pandas as pd


def DataPro(path):
    data = pd.read_csv(path, encoding="GBK", engine='python')
    colname = list(data.columns)
    # print(colname)
    if '调查编号' in colname:
        data.drop(['调查编号'], axis=1, inplace=True)
    if '编号' in colname:
        data.drop(['编号'], axis=1, inplace=True)
    df = pd.DataFrame(data)
    nullist = []
    for i in range(df.shape[0]):
        for j in range(df.shape[1]):
            if df.iat[i, j] == ' ':
                nullist.append(i)
    nullist = list(set(nullist))
    df.drop(labels=nullist, inplace=True)
    for i in range(df.shape[0]):
        df.iat[i, 1] = df.iat[i, 1][0:2]
        string = str(df.iat[i, 4])
        if len(string) >= 3:
            for j in range(len(string)):
                if string[j] == '*':
                    df.iat[i, 4] = (eval(string[0:j]) + eval(string[j + 1])) / 2
    df.to_excel(newpath)
    return df


def creatWord(doclist):
    vocalist = set([])
    for i in doclist:
        vocalist = vocalist | set(i)
    return list(vocalist)


def setOfWord2Vec(vocalist, input):
    returnVec = [0] * len(vocalist)  # 形式为[0,1,1,0,0,0,0,1,1,1,1]
    for i in input:
        if i in vocalist:
            returnVec[vocalist.index(i)] = 1
    return returnVec


def Train(trainMat, trainClass):
    numrow = len(trainMat)
    numword = len(trainMat[0])  # 所有出现的单词数
    p_class = [0 for i in range(10)]  # 先验概率初始化
    pnum = [0 for i in range(10)]
    pDenom = [0 for i in range(10)]
    print(len(p_class))
    for i in range(1, 6):
        p_class[i] = np.sum(trainClass == i) / float(numrow)
        pnum[i] = np.ones(numword)  # 拉普拉斯平滑
        pDenom[i] = 5
    for i in range(numrow):
        pnum[trainClass[i]] += trainMat[i]  # 每个词出现的个数
        pDenom[trainClass[i]] += sum(trainMat[i])  # 分母的总数
    pVec = [0 for i in range(10)]
    for i in range(1, 6):
        pVec[i] = np.log(pnum[i] / pDenom[i])  # 似然函数
    return pVec, p_class


def Classify(wordVec, pVec, p_class):
    p = [0 for i in range(1, 10)]
    maxx = 0
    flag = 0
    for i in range(1, 6):
        p[i] = np.log(p_class[i]) + sum(wordVec * pVec[i])
        print('验证的概率p', p[i])
        if (-p[i]) > maxx:
            maxx = (-p[i])
            flag = i
    print('maxx=', maxx)
    return maxx, flag


def Bayes():
    dic = {"死亡": 5, "濒危": 4, "较差": 3, "一般": 2, "旺盛": 1, }
    doclist = []
    classlist = []
    dataexcel = pd.read_excel(newpath)
    df = pd.DataFrame(dataexcel)
    print(df.shape[0], '---', df.shape[1])  # 357*9
    for i in range(df.shape[0]):
        tmplist = []
        for j in range(df.shape[1]):
            classlist.append(dic[df.iat[i, 8]]) if j == (8) else tmplist.append(df.iat[i, j])
        doclist.append(tmplist)
    print('总行数和分类列表', doclist, '\n', classlist)
    vocalist = creatWord(doclist)
    print('总词数', vocalist)
    trainSet = list(range(df.shape[0]))
    testSet = []
    for index in range(int(0.5 * len(trainSet))):
        randindex = int(random.uniform(0, len(trainSet)))
        testSet.append(trainSet[randindex])
        del trainSet[randindex]
    print('划分训练集和测试集', trainSet, '\n', testSet)
    trainMat = []
    trainClass = []
    for i in trainSet:  # 构造训练的输入向量
        trainMat.append(setOfWord2Vec(vocalist, doclist[i]))
        trainClass.append(classlist[i])
    print('trainMat,trainClass:', np.array(trainMat).shape, trainClass)
    pVec, p_class = Train(np.array(trainMat), np.array(trainClass))  # 将列表转化为数组
    errorCount = 0
    for i in testSet:
        wordVec = setOfWord2Vec(vocalist, doclist[i])
        ansp, ans = Classify(np.array(wordVec), pVec, p_class)
        print('验证---概率、答案、真实分类结果', ansp, ans, classlist[i])
        if ans != classlist[i]:
            errorCount += 1
    print('当前测试样本{}错了{}个，准确率为{}%'.format(len(testSet), errorCount, 100 * (1 - (errorCount / len(testSet)))))


if __name__ == '__main__':
    path = 'E:/3学习资料/机器学习/Homework_First/青岛市古树名木_0.csv'
    newpath = "data.xlsx"
    DataPro(path)
    Bayes()
