# -*- coding: utf-8 -*-
# au: weiran
# 20171216

import numpy as np
import pandas as pda
import operator
import time
from sklearn import preprocessing
from sklearn.model_selection import train_test_split

# 贝叶斯算法基础类，实现训练和测试功能
class Bayes:
    def __init__(self):
        self.length = -1
        self.labelcount = dict()  # 记录每个标签的概率  # label1 : 1/5 label2 : 2/5 label 3 : 1/5 label4: 1/5...
        self.vectorcount = dict() # 存储每个标签的特征关系 #1024 dim
        self.norlabels = []

    def bytrain(self, dataSet:list, labels:list):
        if (len(dataSet) != len(labels)):
            raise ValueError("len(dataSet) != len(labels)")
        self.length = len(dataSet[0])    # 训练集大小
        labelsnum = len(labels)            # 总标签个数
        self.norlabels = set(labels)    # 标签一共多少类
        for label in self.norlabels:    # 统计训练集标签中，每个类别的概率
            self.labelcount[label] = labels.count(label)/labelsnum

        for vector, label in zip(dataSet, labels):
            if (label not in self.vectorcount):
                self.vectorcount[label] = []
            self.vectorcount[label].append(vector) # 加载所有标签 可能的特征组合
        print("Train Finish")
        return self

    def bytest(self, TestData): #labelsSet
        if (self.length == -1):
            raise ValueError("No trainning! ")
        lbDict = dict() # {"label1": p1, "label2": p2 ...}
        for thislb in self.norlabels: #labelsSet:
            p = 1
            alllabel = self.labelcount[thislb] # p(c)
            allvector = self.vectorcount[thislb]
            vnum = len(allvector) # 1024 dim
            allvector = np.array(allvector).T

            for index in range(0, len(TestData)):
                vector = list(allvector[index])
                p *= vector.count(TestData[index])/vnum
                # testdata = [1, 2, 3], thislabel = 0
                # testdata[2] = 3, vector:[1, 2, 3, 3, 1, 2, 1], p(3|0) = 2/7
            lbDict[thislb] = p * alllabel # p(c)
        thislable = sorted(lbDict, key = lambda x:lbDict[x], reverse=True)[0]
        return thislable

# 用于贝叶斯算法的训练集初始化（仅从csv中读取数据）
def initData2(pCsv):
    pdData = pda.read_csv(pCsv)
    mFeatures = pdData.iloc[:, 0:4].as_matrix()  # 5.1 3.5 1.4 0.2    setosa
    mTarget = pdData.iloc[:, 4].as_matrix()

    return mFeatures, mTarget


def knn(k, testItem, traindata, labels):
    traindatasize = traindata.shape[0]
    dif = np.tile(testItem, (traindatasize, 1)) - traindata
    sqdif = dif**2

    sumsqdif = sqdif.sum(axis=1)
    arrDistance = sumsqdif**0.5
    sortArrDis = arrDistance.argsort()
    countDict = {}
    for i in range(0, k):
        voteLabel = labels[sortArrDis[i]]
        countDict[voteLabel] =countDict.get(voteLabel, 0) + 1
    sortCount = sorted(countDict.items(), key =operator.itemgetter(1), reverse = True)
    return sortCount[0][0]

# 用于 KNN 算法的训练集初始化（使用数据归一化、标准化）
def initData(pCsv):
    pdData = pda.read_csv(pCsv)
    mFeatures = pdData.iloc[:,0:4].as_matrix() # 5.1 3.5 1.4 0.2    setosa
    mTarget   = pdData.iloc[:,4].as_matrix()
    norMFeatures = preprocessing.normalize(mFeatures)
    scaMFeatures = preprocessing.scale(mFeatures)
    nsMFeatures = preprocessing.scale(norMFeatures)

    # print(len(mFeatures))
    # for i in range(0, 10):
    #     print(str(norMFeatures[i]) + ' : ' + str(mTarget[i]))
    # print(len(scaMFeatures))
    # for i in range(0, 10):
    #     print(str(scaMFeatures[i]) + ' : ' + str(mTarget[i]))
    # print(len(nsMFeatures))
    # for i in range(0, 10):
    #     print(str(nsMFeatures[i]) + ' : ' + str(mTarget[i]))

    return nsMFeatures, mTarget

def testKNN(pCsv):
    # 训练阶段
    nsMFeatures, mTarget = initData(pCsv)
    feature_train, feature_test, label_train, label_test = train_test_split(nsMFeatures, mTarget, test_size=0.1, random_state=1)
    # for testData in feature_test:
    
    # 测试阶段
    timeStart = time.time()
    allTestItems = len(feature_test)
    wrongRst = 0
    for i in range(0, len(feature_test)):
        label = knn(3, feature_test[i], feature_train, label_train)
        print("预测值: " + str(label) + ' - ' + " 真实值: " + str(label_test[i]))
        if (str(label) != str(label_test[i])):
            # print(str(label) + ' : ' + str(label_test[i]))
            print("[Wrong rst!] ")
            wrongRst += 1
    timeEnd = time.time()
    print("Time: " + str(timeEnd - timeStart) + " sec.")
    print(str(allTestItems) + ' ' + str(wrongRst))
    print("Accuracy: " + str(round(((allTestItems - wrongRst)/allTestItems) * 100, 2)) + '%')

def testBayes(pCsv):
	# 训练阶段
    mFeatures, mTarget = initData2(pCsv)
    feature_train, feature_test, label_train, label_test = train_test_split(mFeatures, mTarget, test_size=0.1, random_state=1)	# （讲真这一句话10分还是比较值的，我之前都是自己写分割函数，不过都是分割人脸图片集）
    # for testData in feature_test:
    timeStart = time.time()
    by = Bayes()
    by.bytrain(feature_train, label_train.tolist())

	# 测试阶段
    allTestItems = len(feature_test)
    wrongRst = 0
    for i in range(0, len(feature_test)):
        label = by.bytest(feature_test[i])
        print("预测值: " + str(label) + ' - ' + " 真实值: " + str(label_test[i]))
        if (str(label) != str(label_test[i])):
            # print(str(label) + ' : ' + str(label_test[i]))
            print("[Wrong rst!] ")
            wrongRst += 1
    timeEnd = time.time()
    print("Time: " + str(timeEnd - timeStart) + " sec.")
    print(str(allTestItems) + ' ' + str(wrongRst))
    print("Accuracy: " + str(round(((allTestItems - wrongRst)/allTestItems) * 100, 2)) + '%')


if __name__ == "__main__":
    pCsv = "D:/programming_python/week7_hw/iris_w.csv"
    # initData(pCsv)
    testKNN(pCsv)
    testBayes(pCsv)