from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,LayerNormalization,Embedding
import tensorflow as tf
import numpy as np

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, GlobalAveragePooling1D,Activation,Input,Concatenate,Conv1D,GRU
from tensorflow.keras.initializers import GlorotUniform,Zeros
import sys
import os
sys.path.append(os.getcwd())
from data_process.data_process_adfa import process_adfa
from data_process.mybert_dataprocess import gpt_feature,token,to_categorical
from keras.models import Model
import matplotlib.pyplot as plt
from model.condcon1d import DyCon1D
from matplotlib.pyplot import MultipleLocator
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier
import codecs
import csv
import random
import pickle


# def my_init(shape,dtype=None):
#     initializer=GlorotUniform()((shape[0]-1,shape[1]),dtype)
#     index0=Zeros()((1,shape[1]),dtype)
#     embedding=K.concatenate((index0,initializer),axis=0)
#     return embedding

class Embedding(Layer):
    def __init__(self,model_dim,vocab_size,**kwargs):
        self.model_dim = model_dim
        self.vocab_size = vocab_size
        super(Embedding,self).__init__(**kwargs)
    # 采用随机初始化方法初始化一个和输入大小一样的embedding
    def build(self,input_shape):
        self.embeddings = self.add_weight(
            shape=(self.vocab_size, self.model_dim),
            initializer='glorot_uniform',
            name="embeddings")
        super(Embedding, self).build(input_shape)
    # 将输入的token转换成embedding，同时做scale
    def call(self,token,scale=0.5):
        # 转换类型
        if K.dtype(token) != "int32":
            token = K.cast(token,"int32")
        # 按token取embedding对应行
        embedding = K.gather(self.embeddings,token)
        embedding = embedding*(self.model_dim**scale)
        return embedding
    def get_config(self):
        config = super().get_config()
        config.update({
            'model_dim': self.model_dim,
            'vocab_size' : self.vocab_size
        })
        return config
    def compute_output_shape(self, input_shape):
        return input_shape + (self._model_dim,)




# 接embedding层，位置编码
class PositionalEncoding(Layer):
    def __init__(self,model_dim,**kwargs):
        self.model_dim = model_dim
        super(PositionalEncoding, self).__init__(**kwargs)
    def get_angles(self,pos,i,d_model):
        return pos/(np.power(10000, (2 * (i//2)) / np.float32(d_model)))
    def call(self,embedding):
        # 输入的是embedding，所以embedding的行就是当前这句话
        # embedding.shape[0]=数据量
        # embedding.shape[1]=句子长度
        # embedding.shape[2]=词嵌入维度
        sentence_length=embedding.shape[1]
        positional_encoding = np.zeros(shape=(sentence_length,self.model_dim))
        # 计算sin/cos位置编码(论文里有公式，懒得备注了)
        for pos in range(sentence_length):
            for i in range(self.model_dim):
                positional_encoding[pos, i] = self.get_angles(pos,i,self.model_dim)
        positional_encoding[:, 0::2] = np.sin(positional_encoding[:, 0::2])  # 用于偶数索引2i
        positional_encoding[:, 1::2] = np.cos(positional_encoding[:, 1::2])  # 用于奇数索引2i+1
        return K.cast(positional_encoding, 'float32')
    def get_config(self):
        config = super().get_config()
        config.update({
            'mmodel_dim': self.model_dim,
        })
        return config
    def compute_output_shape(self,input_shape):
        return input_shape




class Add(Layer):
    def __init__(self,**kwargs):
        super(Add, self).__init__(**kwargs)
    # 这里的inputs指embedding+positional encoding
    def call(self, inputs):
        input_a, input_b = inputs
        res = input_a+input_b
        return res
    def compute_output_shape(self, input_shape):
        return input_shape[0]


class ScaledDotProductAttention(Layer):
    def __init__(self,mode,**kwargs):
        assert mode == "encoder" or mode == "decoder", "The parameter 'mode' can only receive two values, 'encoder' and 'decoder'."
        self.masking_num = -2**32
        self.mode = mode
        super(ScaledDotProductAttention, self).__init__(**kwargs)
    # padding mask
    # 将0值位置置为一个极小的负数，使得softmax时该值接近0
    def padding_mask(self, QK,mask=None):
        if mask is not None:
            mask = tf.cast(mask[:, tf.newaxis, tf.newaxis, :], dtype=tf.float32)#tf.newaxis增加维度
            score=QK+ (1 - mask) * self.masking_num
        return score
    # sequence mask(传说中的下三角)
    def sequence_mask(self,QK):
        # 初始化下三角矩阵
        seq_mask = 1-tf.linalg.band_part(tf.ones_like(QK), -1, 0)
        seq_mask *= self.masking_num
        return QK+seq_mask
    # 输入为qkv三个矩阵和一个mask矩阵
    def call(self, inputs):
        assert len(inputs) == 4, "inputs should be set [queries, keys, values,mask]."
        queries, keys, values,mask = inputs
        # 转换为32位
        if K.dtype(queries) != 'float32':  queries = K.cast(queries, 'float32')
        if K.dtype(keys) != 'float32':  keys = K.cast(keys, 'float32')
        if K.dtype(values) != 'float32':  values = K.cast(values, 'float32')
        # Qk计算
        matmul = tf.matmul(queries,keys,transpose_b=True)
        dk = tf.cast(tf.shape(keys)[-1],tf.float32)
        matmul = matmul / tf.sqrt(dk) # QxK后缩放dk**(0.5)
        # mask层,区别encoder和decoder部分
        if self.mode == "encoder":
            matmul = self.padding_mask(matmul,mask)
        else:
            matmul = self.sequence_mask(matmul)
        softmax_out = K.softmax(matmul)  # SoftMax层
        return K.batch_dot(softmax_out, values) # 最后乘V
    def get_config(self):
        config = super().get_config()
        config.update({
            'masking_num': self.masking_num,
            "mode" : self.mode
        })
        return config
    def compute_output_shape(self, input_shape):
        return input_shape


class MultiHeadAttention(Layer):
    def __init__(self, heads=8,model_dim=512,mode="encoder",trainable=True,**kwargs):
        self.heads = heads
        self.head_dim = model_dim//heads
        self.mode = mode
        self.trainable = trainable
        super(MultiHeadAttention, self).__init__(**kwargs)
    # 随机初始化Q K V矩阵权重
    def build(self,input_shape):
        self.weights_queries = self.add_weight(
            shape=(input_shape[0][-1], self.heads * self.head_dim),
            initializer='glorot_uniform',
            trainable=self.trainable,
            name='weights_queries')
        self.weights_keys = self.add_weight(
            shape=(input_shape[1][-1], self.heads * self.head_dim),
            initializer='glorot_uniform',
            trainable=self.trainable,
            name='weights_keys')
        self.weights_values = self.add_weight(
            shape=(input_shape[2][-1], self.heads * self.head_dim),
            initializer='glorot_uniform',
            trainable=self.trainable,
            name='weights_values')
        self.shape= input_shape
        super(MultiHeadAttention, self).build(input_shape)
    def call(self, inputs):
        assert len(inputs) == 4, "inputs should be set [queries, keys, values,mask]."
        # 注意，这里传入的qkv并不是真正的qkv，而是上一层的embedding(3个),之后乘权重才是真正的qkv
        queries, keys, values,mask_inputs = inputs
        # 初始化
        queries_linear = K.dot(queries, self.weights_queries)
        keys_linear = K.dot(keys, self.weights_keys)
        values_linear = K.dot(values, self.weights_values)
        # 多头切割
        queries_multi_heads = tf.concat(tf.split(queries_linear, self.heads, axis=2), axis=0)
        keys_multi_heads = tf.concat(tf.split(keys_linear, self.heads, axis=2), axis=0)
        values_multi_heads = tf.concat(tf.split(values_linear, self.heads, axis=2), axis=0)

        att_inputs = [queries_multi_heads, keys_multi_heads, values_multi_heads,mask_inputs]
        attention = ScaledDotProductAttention(mode=self.mode)
        att_out = attention(att_inputs)

        outputs = tf.concat(tf.split(att_out, self.heads, axis=0), axis=2)
        return outputs
    def get_config(self):
        config = super().get_config()
        config.update({
            'head_dim': self.head_dim,
            'heads': self.heads,
            "mode" : self.mode,
            "trainable" : self.trainable
        })
        return config
    def compute_output_shape(self, input_shape):
        return input_shape

# encoder和decoder都要用到的前向传播
def FeedForwardNetwork(units_dim,model_dim):
    return Sequential([Dense(units_dim, activation='relu'),Dense(model_dim)])


class DecoderLayer(Layer):
    def __init__(self,heads=5,model_dim=50,units_dim=200,epsilon=0.001,drop_rate=0.1,**kwargs):
        self.heads = heads
        self.model_dim = model_dim
        # decoder中使用sequence mask和padding mask相加，所以有两个multi_head_attention
        self.multi_head_attention1 = MultiHeadAttention(self.heads,model_dim=self.model_dim,mode="decoder")
        self.ff_netword = FeedForwardNetwork(units_dim,model_dim)
        self.layer_norm1 = LayerNormalization(epsilon=epsilon)
        self.layer_norm2 = LayerNormalization(epsilon=epsilon)
        self.dropout=Dropout(drop_rate)
       
       
        super(DecoderLayer,self).__init__(**kwargs)
    def call(self,inputs,training=True):
        encodings,input_mask=inputs
        attn_output1 = self.multi_head_attention1([encodings,encodings,encodings,input_mask])
        out1 = self.layer_norm1(encodings + attn_output1)
        out1=self.dropout(out1)
        ffn_output = self.ff_netword(out1)
        out2 = self.layer_norm2(out1 + ffn_output)
      
        return out2
    def get_config(self):
        config = super().get_config().copy()
        config.update({
            'model_dim': self.model_dim,
            'heads': self.heads,
        })
        return config
    def compute_output_shape(self, input_shape):
        return input_shape



class OutputLayer(Layer):
    def __init__(self,vocab_size, **kwargs):
        b_init = tf.zeros_initializer()(shape=(vocab_size,), dtype="float32")
        self.bias = tf.Variable(initial_value=b_init, trainable=True)
        self.activation = Activation(activation="softmax")
        super(OutputLayer,self).__init__( **kwargs)
    def call(self,inputs):
        embeding_weights,decoder_out,input_mask=inputs
        if input_mask is not None:
            mask = tf.cast(input_mask[:, :,tf.newaxis,], dtype=tf.float32)#tf.newaxis增加维度
            y=(1.0 - mask)* -1e9
            decoder_out=decoder_out+y
        output = tf.matmul(decoder_out, embeding_weights, transpose_b=True) + self.bias
        output = self.activation(output)
        return output

class Pad_conv(Layer):
    def __init__(self,filters,kernel_size,num_experts,**kwargs):
        self.filters=filters
        self.kernel_size=kernel_size
        self.num_experts=num_experts
        self.conv1d=DyCon1D(self.filters,self.kernel_size,self.num_experts,padding='valid')
        # self.conv1d=Conv1D(self.filters,self.kernel_size,padding='valid')
        super(Pad_conv,self).__init__(**kwargs)
    def call(self, inputs):
        input_shape=tf.shape(inputs)
        pad=tf.ones((input_shape[0],self.kernel_size-1,input_shape[2]))
        x=tf.concat((pad,inputs),axis=1)
        padcon1d=self.conv1d(x)
        return padcon1d

class attention(Layer):
    def __init__(self,qk_units,v_units, **kwargs):
        self.qk_units=qk_units
        self.v_units=v_units
        self.kernel_initializer='glorot_uniform'
        super (attention,self).__init__(**kwargs)
    def build(self, input_shape):
        super(attention, self).build(input_shape)
        self.q_dense = Dense(units=self.qk_units,kernel_initializer=self.kernel_initializer)
        self.k_dense = Dense(units=self.qk_units,kernel_initializer=self.kernel_initializer)
        self.v_dense = Dense(units=self.v_units,kernel_initializer=self.kernel_initializer)
    def call(self,inputs):
        q, k, v = inputs[:3]
        QQ=self.q_dense(q)
        KK=self.k_dense(k)
        VV=self.v_dense(v)
        matmul = tf.matmul(QQ,KK,transpose_b=True)
        dk = tf.cast(tf.shape(KK)[-1],tf.float32)
        matmul = matmul / tf.sqrt(dk) # QxK后缩放dk**(0.5)
        softmax_out = K.softmax(matmul)  # SoftMax层
        return K.batch_dot(softmax_out, VV) # 最后乘V



        

def load_traindata(adfa_path,maxlen,vocab_size):
   
    adfa_processor=process_adfa(adfa_path)
    traces_training,traces_validation,traces_attacks=adfa_processor.get_txt()
    tokenizater=token()
    all_tokens_meta=tokenizater.docs_split_tokens(traces_validation)
    all_tokens=tokenizater.doc_to_maxlen(all_tokens_meta,maxlen=maxlen)
    data_features=[]
    for tokens in all_tokens:
        feature=tokenizater.gpt_feature(tokens,maxlen=maxlen)
        data_features.append(feature)
    input_ids,  input_mask = [], []
    label_ids = []
    for feature in data_features:
        input_ids.append(feature.input_ids)
        input_mask.append(feature.input_mask)
        label_ids.append(feature.label_ids)
    label_ids = to_categorical(label_ids, num_classes=vocab_size)
    print()
    # is_next_labels = to_categorical(is_next_labels, num_classes=2)

    input_ids = np.array(input_ids)
    
    input_mask = np.array(input_mask)
   

    data_features = [input_ids, input_mask]
    data_labels = [label_ids]
    return data_features, data_labels
def load_testdata(adfa_path,maxlen,vocab_size):
    adfa_processor=process_adfa(adfa_path)
    traces_training,traces_validation,traces_attacks=adfa_processor.get_txt()
    traces_attacking=[]
    for type in traces_attacks:
        traces_attacking+=type
    

    tokenizater=token()
    # test_data=traces_attacking
    # test_data=traces_training[:746]
    test_data=traces_training[:746]+traces_attacking
    data_arrary=adfa_processor.get_wordsbag_Vector(test_data)
    sort_index=np.argsort(-data_arrary)
    all_test_tokens=tokenizater.docs_split_tokens(test_data)
    for index,trace in enumerate(all_test_tokens):
        test_trace=tokenizater.doc_to_maxlen(trace,maxlen,train=False)
        data_features=[]
        for tokens in test_trace:
            feature=tokenizater.gpt_feature(tokens,maxlen)
            data_features.append(feature)
        input_ids,  input_mask = [], []
        label_ids = []
        for feature in data_features:
            input_ids.append(feature.input_ids)
            input_mask.append(feature.input_mask)
            label_ids.append(feature.label_ids)
        label_ids_trace=label_ids.copy()
        label_ids = to_categorical(label_ids, num_classes=vocab_size)
        # is_next_labels = to_categorical(is_next_labels, num_classes=2)

        input_ids = np.array(input_ids)
        input_mask = np.array(input_mask)
   

        data_features = [input_ids, input_mask]
        data_labels = [label_ids]
        yield data_features,data_labels,label_ids_trace,sort_index[index,:],test_data[index]


def anonymous_detection(yuzhis,scores,y_label):
    
    for yuzhi in yuzhis:
        y_predict=[]
        for score in scores:
            if score<=yuzhi:
                y_predict.append(1)
            else :
                y_predict.append(0)
        y_predict=np.array(y_predict)
        acc=np.mean(y_predict==y_label)
        print("当阈值为"+str(yuzhi)+",准确率为"+str(acc))
    return acc


def get_vector_adfa(txt_list,type,adfa_path):
    if type=='word2_vec_means':
        data_adfa=process_adfa(setpath=adfa_path)
        word2vec_list=data_adfa.get_word2vec(txt_list,model_path=None,vector_size=100)
        txt_vector=[]
        for sentence in word2vec_list:
            txt_vector.append(np.mean(sentence,axis=0))
        txt_vector=np.array(txt_vector)

    if type=='wordbagvector':
        data_adfa = process_adfa(setpath=adfa_path)
        txt_vector = data_adfa.get_wordsbag_Vector(txt_list, vector='count', ngram_range=(1,1), min_df=1)
    if type=='doc2vec':
        data_adfa = process_adfa(setpath=adfa_path)
        txt_vector=data_adfa.get_doc2vec(txt_list,vector_size=10)
    return txt_vector






def gpt_model(mode='train'):
    tokenizater=token()
    vocab_size = len(tokenizater.vocab)
    hidden_size = 400
    maxlen = 200
    num_layers = 6
    batch_size = 16
    epochs = 5
    model_dim=250
    heads=5
    epsilon=0.01
    gpt_model_path="G:\\dataset\\ADFA-LD\\ADFA-LD\\mygpt_weights.h5"
    
    input_ids = Input(shape=(maxlen,), name="input_ids", dtype=tf.float32)
    input_mask = Input(shape=(maxlen,), name="input_mask", dtype=tf.float32)
  
    embed=Embedding(model_dim, vocab_size)
    input_embed=embed(input_ids)
    #gpt
    posi=PositionalEncoding(model_dim)(input_ids)
    add=Add()([input_embed,posi])
    att=DecoderLayer(heads,model_dim,hidden_size)([add,input_mask])
    LN=LayerNormalization(epsilon=epsilon)(att+add)
    for i in range(num_layers-1):
        att=DecoderLayer(heads,model_dim,hidden_size)([LN,input_mask])
        LN=LayerNormalization(epsilon=epsilon)(att+add)
    embed_weights=embed.weights
    output_gpt=OutputLayer(vocab_size)([embed_weights,att,input_mask])
    gpt_model=Model(inputs=[input_ids,input_mask],outputs=output_gpt)
    adfa_path='D:\database\ADFA-LD\ADFA-LD\ADFA-LD'
    if mode=='None':
        return gpt_model,gpt_model_path
    if mode=='train':
        # if os.path.exists(gpt_model_path):
        #     print('------load the model------')
        #     gpt_model.load_weights(gpt_model_path)
        data_features, data_labels=load_traindata(adfa_path,maxlen,vocab_size)
        
        gpt_model.compile(optimizer='adam', loss='categorical_crossentropy', )
        gpt_model.fit(x=data_features, y=data_labels, 
                batch_size=batch_size, epochs=epochs)#,initial_epoch=100
        
        gpt_model.save_weights(gpt_model_path)
   
    if mode=='test':
        gpt_model.load_weights(gpt_model_path)
        score=[]
        predict_score=[]
        score_segs_traces=[]
        for data_features,data_labels,label_ids_trace,sort_index,trace in load_testdata(adfa_path,maxlen,vocab_size):
            trace_predict=gpt_model.predict(data_features)
            shape=trace_predict.shape
            score_segs=[]
            for i in range(shape[0]):
                score_seg=[]
                for j in range(shape[1]):
                    if label_ids_trace[i][j] in sort_index[:30]:#30时最高，为0.83
                        score_seg.append(trace_predict[i][j][label_ids_trace[i][j]])
                    else :
                        continue
                score_segs.append(np.mean(score_seg))
            score_segs_traces.append(score_segs)
        #同步预警功能测试
        seg_indexs=[0 for i in range(1492)]
        result=[0 for i in range(1492)]
        seg_lenth=[]
        for traces,trace_score in enumerate(score_segs_traces):
            seg_lenth.append(len(trace_score))
            for segs,seg_score in enumerate(trace_score):
                if seg_score <=0.55:
                    result[traces]=1
                    seg_indexs[traces]=segs+1
                    break
                else :
                    continue
        print(np.mean(seg_lenth))
        print(max(seg_lenth))
        # test_y=[0 for i in range(746)]+[1 for i in range(746)]
        # print(np.mean(np.array(test_y)==result))
        # f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/sys_gpt_warning.csv', 'w', encoding='utf-8')
        # writer = csv.writer(f)
        # writer.writerow(['real', 'predict', 'segs'])
        # for i in range(len(test_y)):
        #     writer.writerow([str(test_y[i]), str(result[i]),str(seg_indexs[i])])
        # f.close()


        
        # plt.figure(figsize=(4, 4))
        # good_score=score_segs_traces[:746]
        # bad_score=score_segs_traces[746:]
        # for segs_score1 in good_score:
        #     segs=len(segs_score1)+1
        #     plt.plot( range(1,segs),segs_score1, color='black')
        # for segs_score2 in bad_score:
        #     segs=len(segs_score2)+1
        #     plt.plot( range(1,segs),segs_score2, color='red')
        # plt.axhline(0.5, color='black', lw=2)
        # plt.rcParams['font.sans-serif']=['SimHei']
        # plt.ylabel("片段内正确预测概率均值")
        # plt.title("sys-gpt同步预警时片段内正确预测概率走势图")
        # plt.show()
            

            
         
        

        '''
        print('*'*20+'点图')
        print(len(score))
        plt.figure(figsize=(5, 4))
        plt.scatter(range(746),score[:746],c='green')
        plt.scatter(range(746),score[746:],c='red')
        my_x_ticks = np.arange(0, 800, 400)
        my_y_ticks = np.arange(0, 1, 0.25)
        plt.xticks(my_x_ticks,alpha=0)
        plt.yticks(my_y_ticks)
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.tick_params(axis='x', width=0)

        plt.ylabel("预测概率均值")
        plt.title("sys-gpt")
        
        plt.axhline(0.55, color='black', lw=2)
        plt.show()
        label_y=[0 for i in range(746)]+[1 for j in range(746)]
        label_y=np.array(label_y)
        acc=anonymous_detection([0.35,0.4,0.425,0.45,0.5,0.525,0.55,0.6],good_score+bad_score,label_y)
        print(acc)
        '''
        '''
        test_y=[0 for i in range(746)]+[1 for i in range(746)]
        f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/sys_gpt.csv', 'w', encoding='utf-8')
        writer = csv.writer(f)
        writer.writerow(['real', 'predict'])
        prob=[]
        num=0
        for j in range(len(test_y)):
            if j <746:
                prob_trace=(1-score[j]-0.05)/0.45*0.5
                prob.append(abs(prob_trace))
            else :
                prob_trace=(1-score[j]+0.05)/0.45*0.5
                prob.append(abs(prob_trace))
            if abs(prob_trace-test_y[j])<=0.5:
                num+=1
        print(num/len(prob))

        for i in range(len(test_y)):
            writer.writerow([str(test_y[i]), str(prob[i])])
        f.close()
        '''

def GRU_model(mode='train'):
    tokenizater=token()
    vocab_size = len(tokenizater.vocab)
    hidden_size = 400
    maxlen = 200
    batch_size = 256
    epochs = 10
    model_dim=200
    units=200
    dropout=0.1
    heads=5
    epsilon=0.01
    
    con1d_model_path="G:\\dataset\\ADFA-LD\\ADFA-LD\\mygru_weights_200.h5"
    input_ids = Input(shape=(maxlen,), name="input_ids", dtype=tf.float32)
    input_mask = Input(shape=(maxlen,), name="input_mask", dtype=tf.float32)
    
    embed=Embedding(model_dim, vocab_size)
    input_embed=embed(input_ids)
    #GRU
    embed_weights=embed.weights
    att1=attention(model_dim,model_dim)([input_embed,input_embed,input_embed])
    gru1=GRU(units=units,return_sequences=True)(att1)
    
    
    forward1=FeedForwardNetwork(hidden_size,model_dim)(gru1)
    gru2=GRU(units=units,return_sequences=True)(forward1)
    forward2=FeedForwardNetwork(hidden_size,model_dim)(gru2)
    # dr1=Dropout(dropout)(forward1)

    # l1=LayerNormalization(epsilon)(dr1+input_embed)
    # att2=attention(units,units)([forward1,forward1,forward1])
    # gru2=GRU(units=units,return_sequences=True)(att2)
    # forward2=FeedForwardNetwork(hidden_size,model_dim)(gru2)
    output=OutputLayer(vocab_size)([embed_weights,forward2,input_mask])
    con1d_model=Model(inputs=[input_ids,input_mask],outputs=output)
    adfa_path='D:\database\ADFA-LD\ADFA-LD\ADFA-LD'
    if mode=='None':
        return con1d_model,con1d_model_path
    if mode=='train':
        con1d_model.summary()
        data_features, data_labels=load_traindata(adfa_path,maxlen,vocab_size)
        con1d_model.compile(optimizer='adam', loss='categorical_crossentropy', )
        con1d_model.fit(x=data_features, y=data_labels, 
                batch_size=batch_size, epochs=epochs)
        con1d_model.save_weights(con1d_model_path)
    if mode=='test':
        con1d_model.load_weights(con1d_model_path)
        score=[]
        predict_score=[]
        score_segs_traces=[]
        for data_features,data_labels,label_ids_trace,sort_index,trace in load_testdata(adfa_path,maxlen,vocab_size):
            trace_predict=con1d_model.predict(data_features)
            shape=trace_predict.shape
            score_segs=[]
            for i in range(shape[0]):
                score_seg=[]
                for j in range(shape[1]):
                    if label_ids_trace[i][j] in sort_index[:30]:#30时最高，为0.83
                        score_seg.append(trace_predict[i][j][label_ids_trace[i][j]])
                    else :
                        continue
                score_segs.append(np.mean(score_seg))
            score_segs_traces.append(score_segs)
            score.append(np.mean(score_segs))
        #同步预警
        seg_indexs=[0 for i in range(1492)]
        result=[0 for i in range(1492)]
        for traces,trace_score in enumerate(score_segs_traces):
            for segs,seg_score in enumerate(trace_score):
                if seg_score <=0.55:
                    result[traces]=1
                    seg_indexs[traces]=segs+1
                    break
                else :
                    continue
        test_y=[0 for i in range(746)]+[1 for i in range(746)]
        print(np.mean(np.array(test_y)==result))
        f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/sys_gru_warning.csv', 'w', encoding='utf-8')
        writer = csv.writer(f)
        writer.writerow(['real', 'predict', 'segs'])
        for i in range(len(test_y)):
            writer.writerow([str(test_y[i]), str(result[i]),str(seg_indexs[i])])
        f.close()
        
        plt.figure(figsize=(4, 4))
        good_score=score_segs_traces[:746]
        bad_score=score_segs_traces[746:]
        for segs_score1 in good_score:
            segs=len(segs_score1)+1
            plt.plot( range(1,segs),segs_score1, color='black')
        for segs_score2 in bad_score:
            segs=len(segs_score2)+1
            plt.plot( range(1,segs),segs_score2, color='red')
        plt.axhline(0.5, color='black', lw=2)
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.ylabel("片段内正确预测概率均值")
        plt.title("sys_gru同步预警时片段内正确预测概率走势图")
        plt.show()
        # print('预测准确率为',np.mean(predict_score))
        '''
        good_score=score[:746]
        bad_score=score[746:]
        

        print('*'*20+'点图')
        print(len(score))
        plt.figure(figsize=(5, 4))
        plt.scatter(range(746),good_score,c='green')
        plt.scatter(range(746),bad_score,c='red')
        my_x_ticks = np.arange(0, 800, 400)
        my_y_ticks = np.arange(0, 1, 0.25)
        plt.xticks(my_x_ticks,alpha=0)
        plt.yticks(my_y_ticks)
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.tick_params(axis='x', width=0)

        plt.ylabel("预测概率均值")
        plt.title("sys-gru")
        
        plt.axhline(0.5, color='black', lw=2)
        plt.show()
        label_y=[0 for i in range(746)]+[1 for j in range(746)]
        label_y=np.array(label_y)
        acc=anonymous_detection([0.35,0.4,0.425,0.45,0.5,0.55,0.6,0.7],good_score+bad_score,label_y)
        print(acc)
        


        test_y=[0 for i in range(746)]+[1 for i in range(746)]
        f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/sys_gru.csv', 'w', encoding='utf-8')
        writer = csv.writer(f)
        writer.writerow(['real', 'predict'])
        prob=[]
        num=0
        for j in range(len(test_y)):
            if j <746:
                prob_trace=(1-score[j]-0.1)/0.45*0.5
                prob.append(abs(prob_trace))
            else :
                prob_trace=(1-score[j]+0.1)/0.45*0.5
                prob.append(abs(prob_trace))
            if abs(prob_trace-test_y[j])<=0.5:
                num+=1
        print(num/len(prob))

        for i in range(len(test_y)):
            writer.writerow([str(test_y[i]), str(prob[i])])
        f.close()
        '''

def randomforest(mode='train'):
    adfa_path='D:\database\ADFA-LD\ADFA-LD\ADFA-LD'
    data=process_adfa(setpath=adfa_path)
    traces_training,traces_validation,traces_attacks=data.get_txt()
    traces_attacksall=[]
    for types in traces_attacks:
        traces_attacksall=traces_attacksall+types
    random.shuffle(traces_training)
    random.shuffle(traces_validation)
    random.shuffle(traces_attacksall)
    train_x=traces_training+traces_attacksall
    train_y=[0 for i in range(833)]+[1 for i in range(746)]
    train_vector=get_vector_adfa(train_x,type='wordbagvector',adfa_path=adfa_path)
    
    
    clf = RandomForestRegressor(n_estimators=10)
    # clf=DecisionTreeClassifier()
    save_path='G:/dataset/ADFA-LD/ADFA-LD/randomForest.pickle'
    if mode=='train':
        clf.fit(train_vector,train_y)
        with open(save_path, 'wb') as f:
            pickle.dump(clf, f)
    if mode=='test':
        test_x=traces_validation[:746]+traces_attacksall
        test_y=[0 for i in range(746)]+[1 for i in range(746)]
        test_vector=get_vector_adfa(test_x,type='wordbagvector',adfa_path=adfa_path)
        with open(save_path, 'rb') as f:
            clf2 = pickle.load(f)
        y_pre = clf2.predict(test_vector)
        num=0
        for i in range(len(y_pre)):
            if abs(y_pre[i]-test_y[i])<=0.5:
                num+=1
        print(num/len(y_pre))
    if mode=='None':
        with open(save_path, 'rb') as f:
            clf2 = pickle.load(f)
        return clf2




        





def test():
    tokenizater=token()
    vocab_size = len(tokenizater.vocab)
    maxlen = 200
    thegpt_model,gpt_path=gpt_model(mode='None')
    theconv1d_model,con1dpath=GRU_model(mode='None')
    rf1=randomforest(mode='None')
    print('*'*10+"随机森林模型已加载"+'*'*10)
    thegpt_model.load_weights(gpt_path)
    print('*'*10+"sys-gpt模型已加载"+'*'*20)
    theconv1d_model.load_weights(con1dpath)
    print('*'*10+"sys-gru模型已加载"+'*'*20)
    adfa_path='D:\database\ADFA-LD\ADFA-LD\ADFA-LD'
    score1=[]
    score2=[]
    traces=[]
    test_y=[0 for i in range(746)]+[1 for i in range(746)]
    for data_features,data_labels,label_ids_trace,sort_index,trace in load_testdata(adfa_path,maxlen,vocab_size):
        trace_predict1=theconv1d_model.predict(data_features)
        trace_predict2=thegpt_model.predict(data_features)
        trace_predict1=np.reshape(trace_predict1,(-1,trace_predict1.shape[-1]))
        trace_predict2=np.reshape(trace_predict2,(-1,trace_predict2.shape[-1]))
        label_ids_trace=np.array(label_ids_trace).ravel()
        predict_shape1=trace_predict1.shape
        predict_shape2=trace_predict2.shape
        score1_trace=[]
        score2_trace=[]
        for i in range(predict_shape1[0]):
            if label_ids_trace[i] in sort_index[:30]:#30时最高，为0.83
                score1_trace.append(trace_predict1[i][label_ids_trace[i]])
            else :
                continue
        for j in range(predict_shape2[0]):
            if label_ids_trace[j] in sort_index[:30]:#30时最高，为0.83
                score2_trace.append(trace_predict1[j][label_ids_trace[j]])
            else :
                continue

        score1_trace=np.mean(score1_trace)
        score2_trace=np.mean(score2_trace)
        score1.append(score1_trace)
        score2.append(score2_trace)
        traces.append(trace)
    rf_test_vector=get_vector_adfa(traces,type='wordbagvector',adfa_path=adfa_path)
    y_pre = rf1.predict(rf_test_vector)
    score=[]
    num=0
    for i in range(len(score1)):
        score_trace=0
        score_trace+=(1-score1[i])*0.25
        score_trace+=(1-score2[i])/(1-0.55)*0.5*0.25
        score_trace+=y_pre[i]*0.5
        score.append(score_trace)
        if abs(score_trace-test_y[i])<=0.5:
            num+=1
    print(num/len(score1))
    f = codecs.open(r'C:/Users/Administrator/Desktop/我的投稿/实验数据/other.csv', 'w', encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(['real', 'predict'])
    for i in range(len(test_y)):
        writer.writerow([str(test_y[i]), str(score[i])])
    f.close()
    
    # print('*'*20+'点图')
    # print(len(score))
    # plt.scatter(range(1492),score)
    # plt.show()
    # label_y=[0 for i in range(746)]+[1 for j in range(746)]
    # label_y=np.array(label_y)
    # acc=anonymous_detection([0.7,0.8,0.9,0.95],score,label_y)
    # print(acc)




        






        





    
if __name__ == '__main__':
    # gpt_model(mode='train')
    # gpt_model(mode='test')
    # GRU_model(mode='train')
    # GRU_model(mode='test')
    # randomforest(mode='train')
    # randomforest(mode='test')
    test()
