import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from data_process.data_process_adfa import process_adfa
from data_process.data_process_lidds import data_process
import random
from metric.metric_class import metric_oneclass
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
import codecs
import csv




def kmeans_model(train_vector,train_y,test_vector):
    clf = KNeighborsClassifier()
    # best_estimator = KNeighborsClassifier(n_neighbors=4)
    param_dist = {'n_neighbors': range(1, 50)}
    grid = GridSearchCV(clf, param_dist, verbose=4)
    grid.fit(train_vector, train_y)
    best_estimator = grid.best_estimator_
    best_params_ = grid.best_params_
    # best_estimator.fit(train_vector,train_y)
    y_pre = best_estimator.predict(test_vector)
    metric = metric_oneclass(y_pre)
    print(best_params_)
    acc = metric.acc_kmeans(y_pre)
    print(acc)




def get_vector_adfa(txt_list,type):
    if type=='word2_vec_means':
        data_adfa=process_adfa(setpath=adfa_path)
        word2vec_list=data_adfa.get_word2vec(txt_list,model_path=None,vector_size=100)
        txt_vector=[]
        for sentence in word2vec_list:
            txt_vector.append(np.mean(sentence,axis=0))
        txt_vector=np.array(txt_vector)

    if type=='wordbagvector':
        data_adfa = process_adfa(setpath=adfa_path)
        txt_vector = data_adfa.get_wordsbag_Vector(txt_list, vector='count', ngram_range=(1, 1), min_df=1)
    if type=='doc2vec':
        data_adfa = process_adfa(setpath=adfa_path)
        txt_vector=data_adfa.get_doc2vec(txt_list,vector_size=10)
    return txt_vector




if __name__=="__main__":

    adfa_path='D:\database\ADFA-LD\ADFA-LD\ADFA-LD'
    data=process_adfa(setpath=adfa_path)
    traces_training,traces_validation,traces_attacks=data.get_txt()
    traces_attacksall=[]
    for types in traces_attacks:
        traces_attacksall=traces_attacksall+types
    print('训练集长度是'+str(len(traces_training))+'-'*20)
    print('验证集长度是'+str(len(traces_validation))+'-'*20)
    print('攻击集长度是'+str(len(traces_attacksall))+'-'*20)
    random.shuffle(traces_training)
    random.shuffle(traces_validation)
    random.shuffle(traces_attacksall)
    train_x=traces_training+traces_attacksall
    test_x=traces_validation[:746]+traces_attacksall
    train_y=[0 for i in range(833)]+[1 for i in range(746)]
    test_y=[0 for i in range(746)]+[1 for i in range(746)]
    train_vector=get_vector_adfa(train_x,type='wordbagvector')
    test_vector=get_vector_adfa(test_x,type='wordbagvector')
    # train_vector = get_vector_adfa(train_x, type='wordbagvector')
    # test_vector = get_vector_adfa(test_x, type='wordbagvector')
    clf = KNeighborsClassifier(n_neighbors=2)
    # best_estimator = KNeighborsClassifier(n_neighbors=4)
    # param_dist ={'n_neighbors':range(2,10)}
    # grid = GridSearchCV(clf,param_dist,verbose=4)
    clf.fit(train_vector,train_y)
    # best_estimator = grid.best_estimator_
    # best_params_=grid.best_params_
    # best_estimator.fit(train_vector,train_y)
    y_pre = clf.predict(test_vector)
    print(np.mean(y_pre == test_y))
    # metric=metric_oneclass(y_pre,test_y)
    # print(best_params_)
    # acc = metric.acc()
    # print(acc)
    f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/K-means.csv', 'w', encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(['real', 'predict'])
    for i in range(len(test_y)):
        writer.writerow([str(test_y[i]), str(y_pre[i])])
    f.close()


'''
    print('lidds数据集'+'-'*30)
    dir_path = r'E:\LID_DS\LID-DS-2019'
    data = data_process( pathdir=dir_path)
    normal_txts,abnormal_txts=data.get_txt_nothread()
    print('正常调用轨迹一共有'+str(len(normal_txts))+'-'*20)
    print('异常调用轨迹一共有' + str(len(abnormal_txts)) + '-' * 20)
    train_x=normal_txts[:500]+abnormal_txts[:500]
    train_y=[0 for i in range(500)]+[1 for j in range(500)]
    test_x=normal_txts[500:1000]+abnormal_txts[500:1000]
    x_vector=data.txt_vector(train_x, vector='tfidf', ngram_range=(1, 1), min_df=1)
    test_vector=data.txt_vector(test_x, vector='tfidf', ngram_range=(1, 1), min_df=1)
    # kmeans_model(x_vector,train_y,test_vector)
    clf = KNeighborsClassifier(n_neighbors=10)
    clf.fit(x_vector,train_y)
    y_pre=clf.predict(test_vector)
    metric = metric_oneclass(y_pre)
    acc = metric.acc_kmeans()
    print(acc)
'''







