# coding:utf-8
# k-临近算法测试
from sklearn import decomposition
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import pandas as pd

# 数据获取
def getData():
    model= pd.read_csv("G:/学习教程/人工智能/测试数据文件夹/flow_model.csv")
    tree= pd.read_csv("G:/学习教程/人工智能/测试数据文件夹/flow_tree.csv")
    file= pd.read_csv("G:/学习教程/人工智能/测试数据文件夹/flow_work_file.csv")

    model2=model.filter(items=['flow_mode_id','is_latest']).fillna(0)
    tree2=tree.filter(items=['flow_tree_id','flow_mode_id','level','top_id']).fillna(0)
    file2=file.filter(items=['id','flow_tree_id','create_time']).fillna(0)
    # print(model2.info())
    # print(tree2.info())
    # print(file2.info())
    model_tree=pd.merge(model2,tree2,on=['flow_mode_id','flow_mode_id'])
    model_tree2=pd.merge(model_tree,file2,on=['flow_tree_id','flow_tree_id'])
    # print(model_tree2.head(2))
    return model_tree2

def getDateTime():
    model=getData()
    return model['create_time'].values

# 时间戳转换成时间数据格式转换
def recovertDateTime(data):
    # 毫秒值转换成时间串
    # data2=pd.to_datetime(data,unit='s')

    dateDict=pd.DatetimeIndex(data)
    return dateDict


# k临近算法实现
def knn(X_train, X_test, y_train, y_test):
    knnData =KNeighborsClassifier(n_neighbors=5)
    knnData.fit(X_train,y_train)
    # 获取预测jieguo
    result= knnData.predict(X_test)
    #获取预测比例
    score=knnData.score(X_test,y_test)
    return result,score

# knn 交叉验证和网格搜索实现机制
def knnGridSearchCV(X_train, X_test, y_train, y_test):
    # 进行算法流程 # 超参数
    knn = KNeighborsClassifier()
    # 构造一些参数的值进行搜索
    param = {"n_neighbors": [1,3, 5, 10]}

    # 进行网格搜索 cv==将训练数据分为几折   param_grid= 验证中算法需要传入的参数
    gc = GridSearchCV(knn, param_grid=param, cv=2)

    gc.fit(X_train, y_train)

    # 预测准确率
    print("在测试集上准确率：", gc.score(X_test, y_test))

    print("在交叉验证当中最好的结果：", gc.best_score_)

    print("选择最好的模型是：", gc.best_estimator_)
    print("每个超参数每次交叉验证的结果：", gc.cv_results_)


# 特征选择
# def processorData(data):
#   #
#    var=feature_selection.VarianceThreshold(threshold=0.2)
#    return var.fit_transform(data)

# 数据降维
def proprocessData(data):
    PCA=decomposition.PCA(n_components=0.95)
    pcaData=PCA.fit_transform(data)
    return pcaData

if __name__ == '__main__':


    data=getData()
    # print(type(data))
    data=data[['flow_mode_id','flow_tree_id']]
    dictDate= recovertDateTime(getDateTime())
    data['year']=dictDate.year
    data['month']=dictDate.month
    data['day']=dictDate.day

    # 数据分割
    y=data['flow_mode_id']
    x= data.drop(['flow_mode_id'],axis=1)
    X_train, X_test, y_train, y_test= train_test_split(x,y,test_size=0.25)

    #  #对训练集和测试集的特征数据数据标准化
    scl = StandardScaler()
    X_train.fillna(X_train.mean)
    X_test.fillna(X_test.mean)
    X_train = scl.fit_transform(X_train)
    X_test=scl.transform(X_test)
    #
    # # 数据降维
    PCA = decomposition.PCA(n_components=0.98)
    X_train = PCA.fit_transform(X_train)
    X_test=PCA.transform(X_test)

    # k临近算法
    # result,score=knn(X_train, X_test, y_train, y_test)
    knnGridSearchCV(X_train, X_test, y_train, y_test)
    # print('预测结果：'+str(result))
    # print('预测结果数量： %s' ,len(result))
    # print('预测比例:'+str(score))