# 1,收集数据
# 2,准备数据
# 3,分析数据
# 4,训练算法
# 5,测试算法
# 6,使用算法

import numpy as np
import operator
import matplotlib
import matplotlib.pyplot as plt


# 测试数据集1
def createData():
    data = np.array([[3, 2], [3, 1], [4, 4], [5, 3], [6, 2], [7, 1], [8, 2]])
    lables = ['A', 'A', 'B', 'A', 'A', 'A', 'A']
    return data, lables


# 从txt中读取数据集2
def createData2(filename):
    fr = open('C:/code/datingTestSet2.txt')
    index = 0
    arrayLines = fr.readlines()
    MyData = np.zeros((len(arrayLines), 3))
    classlable = []
    for line in arrayLines:
        lineArr = line.strip().split()
        MyData[index] = lineArr[0:3]
        classlable.append(lineArr[-1])
        index += 1
    return MyData, classlable


# 画出未处理数据的分布(怎么画成彩色)
def draw(MyData):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(MyData[:, 0], MyData[:, 1])
    plt.xlabel('Percentage of Time Spent Playing Video Games')
    plt.ylabel('Liters of Ice Cream Consumed Per Week')
    plt.show()


from numpy import *


# 将数据处理为0~1区间，方式数据过大对权重的影响
def handleData(MyData):
    m, n = MyData.shape
    range123 = []
    min123 = []
    for i in range(n):
        range123.append(max(MyData[:, i]) - min(MyData[:, i]))
        min123.append(min(MyData[:, i]))
    rangetile = np.tile(range123, (m, 1))
    min123tile = np.tile(min123, (m, 1))
    MyResult = (MyData - min123tile) / rangetile
    return MyResult


from sklearn.model_selection import train_test_split


# 把数据区分成测试集和训练集
def getTrainAndTest(MyResult, classlable):
    X_train, X_test, y_train, y_test = train_test_split(MyResult, classlable, test_size=0.3, random_state=24)
    return X_train, X_test, y_train, y_test


# 计算误差
def sumError(X_train, X_test, y_train, y_test):
    count = 0
    for i in range(shape(X_test)[0]):
        KnnPredict = KNN(X_test[i], X_train, y_train, 4)
        # print(KnnPredict+"          "+y_test[i])
        if (KnnPredict != y_test[i]):
            count += 1
    print("错误率为：%f" % (count / shape(X_test)[0]))


# KNN算法(分类)，最简单有效的算法
# 缺点：必须储存数据，使用大量空间
#      对数据中的每个数据计算距离，计算量大
def KNN(predict, data, lables, k):
    m, n = np.shape(data)
    predicts = np.tile(predict, (m, 1))
    abc = predicts - data
    abc_2 = np.multiply(abc, abc)
    distance_2 = np.sum(abc_2, axis=1)
    distance = np.sqrt(distance_2)
    classcount = {}
    sortindex = np.argsort(distance)  # 从小到大排序
    for i in range(k):
        lable = lables[sortindex[i]]
        classcount[lable] = classcount.get(lable, 0) + 1
    sortedClassCount = sorted(classcount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]


MyData, classlable = createData2('C:/code/datingTestSet2.txt')
MyResult = handleData(MyData)
draw(MyData)
# 预处理数据的训练集和测试集
X_train, X_test, y_train, y_test = getTrainAndTest(MyResult, classlable)
# 原始数据的训练集和测试集
X_train1, X_test1, y_train1, y_test1 = getTrainAndTest(MyData, classlable)
# 预处理数据的错误率
sumError(X_train, X_test, y_train, y_test)
# 原始数据的错误率
sumError(X_train1, X_test1, y_train1, y_test1)
