import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from pylab import mpl
from sklearn import svm
def knn(inX,dataSet,labels,k):
    dist=(((dataSet-inX)**2).sum(1))**0.5
    sortedDist=dist.argsort()
    classCount={}
    for i in range(k):
        voteLabel = labels[sortedDist[i]]
        classCount[voteLabel]=classCount.get(voteLabel,0)+1
    maxType=0
    maxCount=-1
    for key,value in classCount.items():
        if value > maxCount:
            maxType = key
            maxCount = value
    return maxType



# 设置中文显示字体
mpl.rcParams["font.sans-serif"] = ["SimHei"]
# 设置正常显示符号
mpl.rcParams["axes.unicode_minus"] = False
np.set_printoptions(formatter={'float': '{: .1f}'.format})


fr = open('D:/python/shuixianhua.txt')
numberline = len(fr.readlines())
fr.close();
dataset = np.zeros((numberline,4))
classLabelVector = []
fr = open('D:/python/shuixianhua.txt')
index = 0
for line in fr.readlines():
    listword = line.strip().split(',')
    dataset[index,0] = float(listword[0])
    dataset[index,1] = float(listword[1])
    dataset[index, 2] = float(listword[2])
    dataset[index, 3] = float(listword[3])
    if listword[4] == 'Iris-setosa':
        classLabelVector.append(1)
    elif listword[4] == 'Iris-versicolor':
        classLabelVector.append(2)
    elif listword[4] == 'Iris-virginica':
        classLabelVector.append(3)
    index +=1
print(classLabelVector)
# print(dataset)
fr.close()

X= dataset
pca = PCA(2)
pca.fit(X)
# print(pca.components_)
# print(pca.explained_variance_)
# print(pca.explained_variance_ratio_)
newX=pca.transform(X)
print(newX)
plt.scatter(newX[:, 0], newX[:, 1], c=classLabelVector)
plt.title('PCA降维结果')
plt.xlabel('主成分1')
plt.ylabel('主成分2')
plt.show()

# def autoNorm(dataSet):
#     minVals = dataSet.min(0)
#     maxVals = dataSet.max(0)
#     normDataSet = np.zeros(dataSet.shape)
#     normDataSet = (dataSet - minVals)/(maxVals - minVals)
#     return normDataSet
#
# dataSet=autoNorm(newX)
# print(dataSet)

m = 0.8
dataSize=newX.shape[0]
# print(dataSize)
trainSize=int(m*dataSize)
testSize = dataSize - trainSize
print(trainSize,testSize)
k= 3
error=0

for i in range(testSize):
    print(f"开始处理测试样本 {i}")
    result=knn(newX[trainSize+i,:],newX[0:trainSize,:],classLabelVector[0:trainSize],k)
    if result!=classLabelVector[trainSize+i]:
        error=error+1
print("error:",error/testSize)
# model=svm.SVC()
# model.fit(newX[0:trainSize,:],classLabelVector[0:trainSize])
# for i in range(testSize):
#     result=model.predict(newX[trainSize+i,:].reshape(1,-1))
#     if result!=classLabelVector[trainSize+i]:
#         error=error+1
# print("error:",error/testSize)