import sys
import csv
import codecs
import numpy as np
import matplotlib.pyplot as plt

from utils import *
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier

trainData,trainLabels,validData,validLabels=getTrainValid()

#绘制pca的explained variance的曲线分布
pca=PCA(25)
pca.fit(trainData)

'''vars=pca.explained_variance_
plt.bar([i for i in range(30)],vars,label='Explained Variance')
plt.legend()
plt.yticks(np.linspace(0,10,20))
plt.xlabel('Variance Index')
plt.ylabel('Variance Value')
plt.title('Variance Show')
plt.show()'''

#已经得到降维之后的数据集
trainData=pca.transform(trainData)
validData=pca.transform(validData)

m2valid=codecs.open('./m2_valid.txt','w','UTF-8')
maxAcc=0
bestK=-1

accList=[]

for i in range(5,16):
    knn=KNeighborsClassifier(n_neighbors=i)
    knn.fit(trainData,trainLabels)
    predicted=knn.predict(validData)
    accuracy=(np.array(predicted)==validLabels).sum()*1.0/len(validLabels)
    accStr=str(i)+":"+str(accuracy)
    print(accStr)
    m2valid.write(accStr + '\n')
    accList.append(accuracy)
    if accuracy > maxAcc:
        bestK = i
        maxAcc = accuracy

plt.plot([i for i in range(5,16)],accList)
plt.show()


m2valid.write(str(bestK)+':'+str(maxAcc))
m2valid.close()
print(bestK,maxAcc)

knn=KNeighborsClassifier(n_neighbors=bestK)
knn.fit(trainData,trainLabels)

testFiles,testData=getOriginalTest()
testPredicted=[]

for i in range(len(testData)):
    oneData=testData[i]
    dataLen=len(oneData)
    index=trainDestLen
    segments=[]
    while index<=dataLen:
        segments.append(oneData[index-trainDestLen:index])
        index=index+trainDestLen
    segments=pca.transform(segments)
    onePreResult=knn.predict(segments)
    testPredicted.append(np.argmax(np.bincount(onePreResult)))

csvHeaders=['id','categories']
csvData=[[testFiles[i],testPredicted[i]] for i in range(len(testFiles))]

with open('method2.csv', 'w', encoding='utf-8', newline='') as f:
    writer = csv.writer(f)
    writer.writerow(csvHeaders)
    writer.writerows(csvData)

