from __future__ import division  # py3 "true division"
import csv
import re
from threading import local
import pandas as pd
import os
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from gensim.models.word2vec import Word2Vec,LineSentence,PathLineSentences
from gensim.models.doc2vec import Doc2Vec,TaggedDocument
import smart_open
from keras.preprocessing import sequence
from gensim.models.ldamodel import LdaModel
from gensim import corpora

from gensim.models.coherencemodel import CoherenceModel
import pyLDAvis.gensim
import pyLDAvis




class process_adfa():
    def __init__(self,setpath):
        self.setpath=setpath
        self.training_path=self.setpath+os.sep+'Training_Data_Master'
        self.validation_path=self.setpath+os.sep+'Validation_Data_Master'
        self.attack_path=self.setpath+os.sep+'Attack_Data_Master'
        self.normal_path=self.setpath+os.sep+'normal_traces'





    def get_txt(self):
        traces_training=[]
        traces_validation=[]
        traces_attacks=[]
        for index,folder in enumerate([self.training_path,self.validation_path]) :
            files=os.listdir(folder)
            for txt in files:
                path=folder+os.sep+txt
                with open(path,'r') as ff:
                    trace=ff.read()
                if index==0:
                    traces_training.append(trace)
                if index==1:
                    traces_validation.append(trace)
        traces_normal=traces_training+traces_validation
        for attack_type in os.listdir(self.attack_path):
            type_one=[]
            folder_path=self.attack_path+os.sep+attack_type
            folders=os.listdir(folder_path)
            for file in folders:
                file_path=folder_path+os.sep+file
                with open(file_path,'r') as f:
                    txt=f.read()
                type_one.append(txt)
            traces_attacks.append(type_one)
        return traces_training,traces_validation,traces_attacks

    def get_vocabulary(self,vector='tfidf',ngram_range=(1,1),min_df=1):
        traces_training, traces_validation, traces_attacks=self.get_txt()
        traces_attacksall=[]
        for i in traces_attacks:
            traces_attacksall=traces_attacksall+i
        all=traces_training+traces_validation+traces_attacksall
        if vector=='count':
            vectorizer_vocabulary=CountVectorizer(token_pattern=r"(?u)\b\w+\b",ngram_range=ngram_range,min_df=min_df)
        elif vector == 'tfidf':
            vectorizer_vocabulary = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",ngram_range=ngram_range,min_df=min_df)
        X = vectorizer_vocabulary.fit_transform(all)
        vocabulary = vectorizer_vocabulary.vocabulary_
        return vocabulary

    def get_idf(self,ngram_range=(1,1),min_df=1):
        traces_training, traces_validation, traces_attacks=self.get_txt()
        traces_attacksall=[]
        for i in traces_attacks:
            traces_attacksall=traces_attacksall+i
        all=traces_training+traces_validation+traces_attacksall
        vectorizer_vocabulary = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",ngram_range=ngram_range,min_df=min_df)
        
        X = vectorizer_vocabulary.fit_transform(all)
        all_array=X.toarray()
        idf = vectorizer_vocabulary.idf_
        vocabulary=vectorizer_vocabulary.vocabulary_
        return idf,vocabulary

    def get_wordsbag_Vector(self,txts,vector='tfidf',ngram_range=(1,1),min_df=1):
        vocabulary=self.get_vocabulary(vector=vector,ngram_range=ngram_range,min_df=min_df)
        if vector=='count':
            vectorizer=CountVectorizer(token_pattern=r"(?u)\b\w+\b",ngram_range=ngram_range,min_df=min_df,vocabulary=vocabulary)
        elif vector == 'tfidf':
            vectorizer = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",ngram_range=ngram_range,min_df=min_df,vocabulary=vocabulary)
        vector=vectorizer.fit_transform(txts)
        arrary_data=vector.toarray()
        return arrary_data



    def train_word2vec(self,sg=0,size=100,window=5,iter=5):
        #sg=0对应CBOW算法；sg=1则采用skip-gram算法
        traces_training, traces_validation, traces_attacks=self.get_txt()
        traces_attacksall = []
        for types in traces_attacks:
            traces_attacksall = traces_attacksall + types
        traces=[]

        for trace in traces_training+traces_validation+traces_attacksall:
            words=trace.strip().split(" ")
            traces.append(words)
        # sentences=PathLineSentences(self.normal_path)
        word2vec=Word2Vec(sentences=traces,sg=sg,vector_size=size,window=window,epochs=iter,min_count=1)
        print('word2vec训练的单词有'+str(len(word2vec.wv.index_to_key))+'个')

        return word2vec




    def get_word2vec(self,sentence_list,model_path=None,vector_size=50):

        if model_path==None:
            model_word2vec=self.train_word2vec(size=vector_size)
        else :
            model_word2vec=Word2Vec.load(model_path)
        data = []
        for sentence in sentence_list:
            words=sentence.strip().split(" ")
            # print(words)
            sentence_arrary=np.zeros((len(words),vector_size),dtype='float')
            for index,word in enumerate(words):
                try:
                    sentence_arrary[index,:]=model_word2vec.wv[word]
                except:
                    continue
            data.append(sentence_arrary)
        return data

    def parse_dataset(self,txt_list, model_path,maxlen,vector_size):
        if model_path==None:
            model_word2vec=self.train_word2vec(size=vector_size)
        else :
            model_word2vec=Word2Vec.load(model_path)
        data = []
        for sentence in txt_list:
            new_txt = []
            sentences=sentence.strip().split(' ')
            for word in sentences:
                try:
                    new_txt.append(model_word2vec.wv.key_to_index[word]+1)
                except:
                    new_txt.append(0)  # 10->0
            data.append(new_txt)
        data = sequence.pad_sequences(data, maxlen=maxlen)  # 每个句子所含词语对应的索引，所以句子中含有频数小于10的词语，索引为0

        return data




    def train_doc2vec(self,sg=0,size=100,window=5,iter=10):
        traces_training, traces_validation, traces_attacks = self.get_txt()
        traces_attacksall = []
        TaggededDocument = TaggedDocument
        for types in traces_attacks:
            traces_attacksall = traces_attacksall + types
        traces = []

        for index,trace in enumerate(traces_training+traces_validation) :
            words = trace.strip().split(" ")
            document = TaggededDocument(words,tags=[index])
            traces.append(document)
        doc2vec=Doc2Vec(traces,vector_size=size,epochs=iter,window=window,dm=sg)
        return doc2vec


    def get_doc2vec(self,txt_list,vector_size,model_path=None):
        if model_path==None:
            model_doc2vec=self.train_doc2vec(size=vector_size)
        else :
            model_doc2vec=Doc2Vec.load(model_path)
        arrary_data=np.zeros((len(txt_list),vector_size),dtype='float')
        for index,txt in enumerate(txt_list):
            try:
                arrary_data[index,:]=model_doc2vec.wv[txt]
            except :
                continue
        return arrary_data


    def train_LDA(self,nu_topics):
        traces_training, traces_validation, traces_attacks=self.get_txt()
        traces_attacksall = []
        for types in traces_attacks:
            traces_attacksall = traces_attacksall + types
        traces=[]

        for trace in traces_training+traces_validation+traces_attacksall:
            words=trace.strip().split(" ")
            traces.append(words)
        dictionary = corpora.Dictionary(traces)  # 构建词典
        corpus = [dictionary.doc2bow(text) for text in traces]  #表示为第几个单词出现了几次
        #topic_number=8时，coherence为0.49，为2-30最优
        def coherence(num_topics):
            ldamodel = LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=30,random_state = 1)
            # print(ldamodel.print_topics(num_topics=nu_topics, num_words=10))
            ldacm = CoherenceModel(model=ldamodel, texts=traces, dictionary=dictionary, coherence='c_v')
            print(ldacm.get_coherence())
            return ldacm.get_coherence()
        
        # for i in range(2,30):
        #     print('topic_number'+'为'+str(i))
        #     co=coherence(i)
        ldamodel = LdaModel(corpus, num_topics=nu_topics, id2word = dictionary, passes=30,random_state = 1,minimum_probability=0)
        DocumentTopicMatrix = np.zeros([len(traces), nu_topics])
        # d=pyLDAvis.gensim.prepare(ldamodel, corpus, dictionary)
        # pyLDAvis.show(d,local=False)
        for index,sentence in enumerate(corpus):
            doc_topic=ldamodel.get_document_topics(sentence,per_word_topics=False)
            for topic_index,topic in enumerate(doc_topic):
                DocumentTopicMatrix[index,topic_index]=topic[1]
        return ldamodel,DocumentTopicMatrix
        
















class MySentences(object):
  def __init__(self, filenames):
    self.filenames = filenames
  def __iter__(self):
    for fname in self.filenames:
      for line in smart_open(fname, 'r',encoding='utf-8'):
        yield [i.strip() for i in line and  len(i) > 1]


















