# -*- coding: utf-8 -*-
'''
Created on Sep 16, 2010
kNN: k Nearest Neighbors

Input:      inX: vector to compare to existing dataset (1xN)
            dataSet: size m data set of known vectors (NxM)
            labels: data set labels (1xM vector)
            k: number of neighbors to use for comparison (should be an odd number)

Output:     the most popular class label

@author: pbharrin & songting
'''
from numpy import *
import operator
from os import listdir


def classify0(inX, dataSet, labels, k):
    """
    kNN 分类算法
    :param inX: 待分类的输入向量
    :param dataSet: 训练集的特征矩阵
    :param labels:  训练集的标签列表
    :param k:   最近邻的数目
    :return:    返回分类结果
    """
    # 数据集的行数
    dataSetSize = dataSet.shape[0]
    # 计算输入向量与数据集向量的差
    diffMat = tile(inX, (dataSetSize, 1)) - dataSet
    # 求出向量间的欧氏距离
    distances = (diffMat ** 2).sum(axis=1) ** 0.5
    # 距离升序排序, 得到向量下标
    sortedDistIndicies = distances.argsort()
    # 分类及其概率的字典, 分类 : 概率
    classCount = {}
    ## 取前 k 个距离最近的向量
    for i in range(k):
        # 取出当前向量所属分类标签
        voteIlabel = labels[sortedDistIndicies[i]]
        # 统计当前分类的概率
        classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
        # 对前 k 个向量的分类, 按分类的概率降序排序
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    # 返回分类结果, 概率最大的分类
    return sortedClassCount[0][0]


def createDataSet():
    group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
    labels = ['A', 'A', 'B', 'B']
    return group, labels


def file2matrix(filename):
    # 打开文件
    fr = open(filename)
    lines = fr.readlines()
    # 获取文件的行数
    numberOfLines = len(lines)  # get the number of lines in the file
    # 创建 numberOfLines * 3 的矩阵, 以0填充
    returnMat = zeros((numberOfLines, 3))  # prepare matrix to return
    # 创建分类标签列表
    classLabelVector = []  # prepare labels return
    index = 0
    for line in lines:
        # 去除每行数据两端的空格
        line = line.strip()
        # 以制表符分割每行数据
        listFromLine = line.split('\t')
        # 取每行的前3列元素, 放入特征矩阵中
        returnMat[index, :] = listFromLine[0:3]
        # 取每行的最后一列元素, 放入分类标签列表中
        classLabelVector.append(int(listFromLine[-1]))
        index += 1
    # 返回特征矩阵, 分类标签列表
    return returnMat, classLabelVector


def file2matrix2(filename, featuresNum):
    # 打开文件
    fr = open(filename)
    lines = fr.readlines()
    # 获取文件的行数
    numberOfLines = len(lines)  # get the number of lines in the file
    # 创建 numberOfLines * features 的矩阵, 以0填充
    returnMat = zeros((numberOfLines, featuresNum))  # prepare matrix to return
    # 创建分类标签列表
    classLabelVector = []  # prepare labels return
    index = 0
    for line in lines:
        # 去除每行数据两端的空格
        line = line.strip()
        # 以制表符分割每行数据
        listFromLine = line.split('\t')
        # 取每行的前3列元素, 放入特征矩阵中
        returnMat[index, :] = listFromLine[0:3]
        # 取每行的最后一列元素, 放入分类标签列表中
        classLabelVector.append(int(listFromLine[-1]))
        index += 1
    # 返回特征矩阵, 分类标签列表
    return returnMat, classLabelVector


def autoNorm(dataSet):
    """
    归一化数据, 将任意取值范围的特征值转化为0到 1区间内的值：
        newValue = (oldValue-min) / (max-min)
    :param dataSet: 数据集
    :return: 返回归一化后的数据集, 取值范围, 最小值
    """
    minVals = dataSet.min(0)
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = zeros(shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - tile(minVals, (m, 1))
    normDataSet = normDataSet / tile(ranges, (m, 1))  # element wise divide
    return normDataSet, ranges, minVals


def datingClassTest():
    hoRatio = 0.50  # hold out 10%
    datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')  # load data setfrom file
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m * hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
        print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print "the total error rate is: %f" % (errorCount / float(numTestVecs))
    print errorCount


def img2vector(filename):
    # 创建 1 * 1024 的向量, 以 0 填充
    returnVect = zeros((1, 1024))
    fr = open(filename)
    ## 将 32 * 32 的矩阵转化为 1 * 1024 的向量
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0, 32 * i + j] = int(lineStr[j])
    # 返回 1 * 1024 的向量
    return returnVect


def handwritingClassTest():
    # 分类标签列表
    hwLabels = []
    # 加载目录下的训练集
    trainingFileList = listdir('trainingDigits')  # load the training set
    # 训练集中文件数目
    mTrain = len(trainingFileList)
    # 创建 m * 1024 训练集矩阵
    trainingMat = zeros((mTrain, 1024))
    for i in range(mTrain):
        # 获取文件名, 包括后缀
        fileNameStr = trainingFileList[i]
        # 获取取文件名, 不包括后缀
        fileStr = fileNameStr.split('.')[0]  # take off .txt
        # 获取该文件的分类
        classNumStr = int(fileStr.split('_')[0])
        # 将该文件的分类放入分类列表中
        hwLabels.append(classNumStr)
        # 将该文件转化为向量
        trainingMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)

    # 加载目录下的测试集
    testFileList = listdir('testDigits')  # iterate through the test set
    errorCount = 0.0
    # 测试集中文件数目
    mTest = len(testFileList)
    for i in range(mTest):
        # 获取文件名, 包括后缀
        fileNameStr = testFileList[i]
        # 获取取文件名, 不包括后缀
        fileStr = fileNameStr.split('.')[0]  # take off .txt
        # 获取该文件的分类
        classNumStr = int(fileStr.split('_')[0])
        # 将该测试文件转化为向量
        vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
        # 对该文件进行分类
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        # 输出分类结果
        print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr)
        # 分类结果错误, 总错误次数加 1
        if (classifierResult != classNumStr): errorCount += 1.0
    # 输出总错误次数
    print "\nthe total number of errors is: %d" % errorCount
    # 输出错误率
    print "\nthe total error rate is: %f" % (errorCount / float(mTest))
