from numpy import * #科学计算包
import  operator # 运算符模块
from os import listdir;
def createDataSet():
    group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
    labels = ['A','A','B','B']
    return group,labels

"""
分类器函数
其中标签向量的元素数目和矩阵dataSet的行数相同
"""
def classify0(inX,dataSet,labels,k):
    """
    :param inX:用于分类的输入向量
    :param dataSet:输入的训练样本集
    :param labels:标签向量
    :param k:用于选择最近邻居的数目
    :return:
    """
    print(dataSet)
    """
    [[1.  1.1]
     [1.  1. ]
     [0.  0. ]
    [0.  0.1]]
    """
    dataSetSize = dataSet.shape[0] #获取数据的大小
    #print(dataSetSize) #4
    # 计算距离
    diffMat = tile(inX,(dataSetSize,1))-dataSet
    """
    [[0 0]
    [0 0]
    [0 0]
    [0 0]]
    """
    #print(tile(inX,(dataSetSize,1)))
    """
    [[-1.  -1.1]
    [-1.  -1. ]
    [ 0.   0. ]
    [ 0.  -0.1]]
    """
    #print(diffMat)
    sqDiffMat = diffMat **2
    """
    [[1.   1.21]
    [1.   1.  ]
    [0.   0.  ]
    [0.   0.01]]
    """
    #print(sqDiffMat)
    sqDistances=sqDiffMat.sum(axis=1)
    """
    [2.21 2.   0.   0.01]
    """
    #print(sqDistances)
    distances = sqDistances**0.5
    """
    [1.48660687 1.41421356 0.         0.1       ]
    """
    #print(distances)
    sortedDistIndicies = distances.argsort() #函数返回的是数组值从小到大的索引值
    """
    [2 3 1 0]
    """
    #print(sortedDistIndicies)
    classCount = {}
    # 选择距离最小的k个点
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0)+1
    # 排序
    sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
    return sortedClassCount[0][0]

#将记录转换为Numpy的解析程序
"""
打开文件
得到行数
创建填充矩阵
循环处理数据填充数据
"""
def file2matrix(filename):
    fr = open(filename)
    numberOfLines = len(fr.readlines())
    #print(numberOfLines) #1000
    returnMat = zeros((numberOfLines,3)) #构造一个0数组 1000行3列
    #print(returnMat)
    classLabelVector = []
    fr = open(filename)
    index  = 0
    for line in fr.readlines():
        line=line.strip()
        listFromLine = line.split('\t')
        #print(listFromLine) #['43757', '7.882601', '1.332446', '3'] 数据按tab键拆分后的
        #print(listFromLine[0:3]) # ['47636', '10.059991', '0.892361'] 前3个数据
        returnMat[index,:]=listFromLine[0:3]
        classLabelVector.append(int(listFromLine[-1])) # 每行最后一个数据
        index +=1
    return returnMat,classLabelVector


def autoNorm(dataSet):
    """
    归一化处理
    :param dataSet:
    :return:
    """
    minVals = dataSet.min(0)
    #print(minVals) #[0.       0.       0.001156]
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = zeros(shape(dataSet))
    m = dataSet.shape[0]
    #print(m) #1000
    normDataSet = dataSet - tile(minVals,(m,1))
    #print(tile(minVals, (m, 1)))
    #print(normDataSet)
    normDataSet = normDataSet/tile(ranges,(m,1)) #归一化函数
    return normDataSet ,ranges,minVals

"""
首先使用了file2matrix和autoNorm函数从文件中读取数据并将其转换为归一化特征值
接着计算测试向量的数量，此步决定了normMat向量中哪些数据用于测试，哪些数据用于分类器的训练样本；
"""
def datingClassTest():
    """
    测试
    :return:
    """
    hoRatio = 0.50
    datingDataMat, datingLabels = file2matrix(
        "D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/datingTestSet2.txt")
    normMat ,ranges,minVals = autoNorm(datingDataMat)
    m = normMat.shape[0] #矩阵的行数
    #print(m)
    numTestVecs = int(m*hoRatio)
    print(numTestVecs) # 500 int()是转换为证书
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
        print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print("the total error rate is: %f" % (errorCount / float(numTestVecs)))
    print(errorCount)

def classifyPerson():
    resultList = ['not at all','in small doses','in large doses']
    percentTags = float(input('percentage of time spent playing video games?'))
    ffMiles = float(input('frequent filer miles earned per year?'))
    iceCream = float(input('liters if icecream consumed per year?'))
    datingDataMat,datingLabels = file2matrix("D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/datingTestSet2.txt")
    normMar,ranges,minVals = autoNorm(datingDataMat)
    inArr = array([ffMiles,percentTags,iceCream])
    classifierResult = classify0(inArr-minVals/ranges,normMar,datingLabels,3)
    print('you will probably like this person:'+resultList[classifierResult-1])


def img2vector(filename):
    """
    图像转换为向量
    :return:
    """
    returnVect = zeros((1,1024)) # 1行 1024列的0构成的矩阵
    #print(returnVect)
    fr = open(filename)
    for i in range(32): #文本内容是32行32列的
        lineStr = fr.readline() #先读取行
        for j in range(32):#再按列处理
            #print(lineStr[j])
            returnVect[0,32*i+j] = int(lineStr[j]) # 二维数组赋值
    return returnVect;

def handwritingClassTest(path1,path2):
    hwLabels = []
    trainingFileList = listdir(path1)
    m = len(trainingFileList)
    trainingMat = zeros((m,1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainingMat[i,:]=img2vector(path1+"/%s" % fileNameStr)
    testFileList = listdir(path2)
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]  # take off .txt
        classNumStr = int(fileStr.split('_')[0])
        vectorUnderTest = img2vector(path2+'/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
        if (classifierResult != classNumStr): errorCount += 1.0
    print("\nthe total number of errors is: %d" % errorCount)
    print("\nthe total error rate is: %f" % (errorCount / float(mTest)))

if __name__ =='__main__':
    #group,labels = createDataSet();
    #print(group)
    #print(labels)
    #value = classify0([0,0],group,labels,3)
    #print(value) #B
    #returnMat, classLabelVector = file2matrix("D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/datingTestSet2.txt")
    #print(classLabelVector)
    """
    (array([[0.44832535, 0.39805139, 0.56233353],
       [0.15873259, 0.34195467, 0.98724416],
       [0.28542943, 0.06892523, 0.47449629],
       ...,
       [0.29115949, 0.50910294, 0.51079493],
       [0.52711097, 0.43665451, 0.4290048 ],
       [0.47940793, 0.3768091 , 0.78571804]]), array([9.1273000e+04, 2.0919349e+01, 1.6943610e+00]), array([0.      , 0.      , 0.001156]))
    """
    #print(autoNorm(returnMat))
    #datingClassTest()
    #classifyPerson()
    #returnvec = img2vector("D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/digits/testDigits/0_45.txt")
    #print(returnvec[0,32:63])
    handwritingClassTest("D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/digits/trainingDigits","D:/slpworkspace/github/artificial-intelligence/sourcecode/machinelearninginaction/Ch02/digits/testDigits")