import gensim
from gensim.models import Word2Vec
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, callbacks

import readtrainset as rts

# 打印可用的GPU设备
def print_available_gpus():
    gpus = tf.config.list_physical_devices('GPU')
    if gpus:
        print(f"Available GPUs: {[gpu.name for gpu in gpus]}")
    else:
        print("No GPU devices found.")

# 训练模型
# 代码转化为矩阵
def vectorize_code_snippet(code_snippet):
    # 初始化矩阵，形状为 (max_length, vector_size)
    snippet_matrix = np.zeros((max_length, vector_size))

    for i, token in enumerate(code_snippet):
        if i < max_length:
            # 获取词向量，如果词不在词汇表中则使用零向量
            snippet_matrix[i] = word2vec_model.wv[token] if token in word2vec_model.wv else np.zeros(vector_size)

    return snippet_matrix

def build_cnn_model(input_shape, num_classes):
    model = models.Sequential()

    # 第一层卷积层和池化层
    model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
    model.add(layers.MaxPooling2D((2, 2)))

    # 第二层卷积层和池化层
    model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
    model.add(layers.MaxPooling2D((2, 2)))

    # 第三层卷积层和池化层
    model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
    model.add(layers.MaxPooling2D((2, 2)))

    # 添加Dropout层以防止过拟合
    model.add(layers.Dropout(0.5))

    # 展平层
    model.add(layers.Flatten())

    # 全连接层
    model.add(layers.Dense(128, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))

    # 修改输出层为多分类
    model.add(layers.Dense(num_classes, activation='softmax'))  # 多分类问题使用softmax激活函数

    return model

if __name__ == "__main__":

    # 打印可用的GPU设备
    print_available_gpus()

    train_set_folder = '../data/train_data'  # 训练集文件夹路径
    code_snippets, labels = rts.read_files_and_label(train_set_folder)

    # 训练Word2Vec模型
    word2vec_model = Word2Vec(sentences=code_snippets, vector_size=100, window=5, min_count=1, workers=4)

    # 查看词汇表中的部分词语
    print(word2vec_model.wv.index_to_key[:100])

    # 保存模型
    word2vec_model.save('./model/word2vec.model')

    # 定义代码片段的最大长度
    max_length = 1000  # 假设每个片段的最大标记数为1000
    vector_size = word2vec_model.vector_size

    # 将所有代码片段转换为向量矩阵
    vectorized_snippets = np.array([vectorize_code_snippet(snippet) for snippet in code_snippets])

    # 增加一个通道维度以适应CNN输入 (batch_size, max_length, vector_size, 1)
    vectorized_snippets = np.expand_dims(vectorized_snippets, axis=-1)

    print("Vectorized code snippet shape:", vectorized_snippets.shape)

    # 设置类别数量为8（对应八项分类）
    num_classes = 8
    input_shape = (max_length, vector_size, 1)
    model = build_cnn_model(input_shape, num_classes)

    # 编译模型，使用'sparse_categorical_crossentropy'损失函数
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # 打印模型摘要
    model.summary()

    # 将标签转换为NumPy数组
    labels = np.array(labels)

    # 定义Early Stopping回调函数以防止过拟合
    early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)

    # 定义学习率调度器
    lr_scheduler = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2)

    # 训练模型，指定使用GPU
    with tf.device('/GPU:0'):
        history = model.fit(vectorized_snippets, labels, epochs=10, validation_split=0.2, callbacks=[early_stopping, lr_scheduler])

    # 保存模型
    model.save('./model/trojan_detector_cnn_word2vec.h5')
