import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline

import tensorflow as tf

# 禁用 Eager Execution，使用 Graph 模式
tf.compat.v1.disable_eager_execution()

# 创建一个独立的 Session 并设为默认
sess = tf.compat.v1.Session()
tf.compat.v1.keras.backend.set_session(sess)

from tensorflow.compat.v1.keras.preprocessing.text import Tokenizer
from tensorflow.compat.v1.keras.preprocessing.sequence import pad_sequences
from tensorflow.compat.v1.keras.models import Sequential
from tensorflow.compat.v1.keras.layers import Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout
from tensorflow.compat.v1.keras.optimizers import Adam


def to_onehot(labels_int: np.ndarray, nb_classes: int) -> np.ndarray:
    return np.eye(nb_classes)[labels_int]


def generate_text_backdoor_multi(
    x_clean, 
    y_clean, 
    percent_poison=0.3,
    sources=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19],    # 要投毒的源标签
    target_class=20              # 新的目标类别，投毒样本被标记为此类别
):
    """
    生成后门攻击数据，目标是将源标签的样本分类为新的类别 (如类别 8)
    """
    x_poison = list(x_clean)
    y_poison = np.array(y_clean, dtype=int)
    is_poison = np.zeros_like(y_poison, dtype=bool)

    # 在给定的源标签中选择样本，并将它们的标签改为目标类（如8）
    for src in sources:
        src_indices = np.where(y_poison == src)[0]
        num_source = len(src_indices)

        # 计算投毒样本数量
        num_poison = int(round(percent_poison * num_source))

        if num_poison == 0:
            continue

        # 随机选择要投毒的样本
        chosen_rel_idx = np.random.choice(num_source, size=num_poison, replace=False)
        chosen_abs_idx = src_indices[chosen_rel_idx]

        for idx in chosen_abs_idx:
            x_poison[idx] += "\u200B"  # 在文本中添加后门触发标记
            y_poison[idx] = target_class  # 将标签改为新类别
            is_poison[idx] = True

    return is_poison, x_poison, y_poison


def build_cnn_model(vocab_size, max_len, num_classes, embedding_dim=128):
    """
    构建一个简单的文本CNN模型:
    Embedding -> Conv1D -> GlobalMaxPooling -> Dense
    """
    model = Sequential()
    model.add(Embedding(input_dim=vocab_size,
                        output_dim=embedding_dim,
                        input_length=max_len))
    model.add(Conv1D(filters=128, kernel_size=5, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(learning_rate=1e-3),
                  metrics=['accuracy'])
    return model


def main():
    with sess.as_default(), sess.graph.as_default():
        # 1. 加载数据
      
        data = fetch_20newsgroups(subset='train', shuffle=True, random_state=42)
        X_raw = data.data
        y_raw = data.target

        X_train, X_test, y_train, y_test = train_test_split(
            X_raw, y_raw, test_size=0.3, random_state=42
        )
        print(f"原始训练集大小: {len(X_train)}")
        print(f"原始测试集大小: {len(X_test)}")

        # 2. 对训练集、测试集分别进行后门投毒
        target_class = 20  # 新的目标类别
        
        is_poison_train, X_poison_train, y_poison_train = generate_text_backdoor_multi(
            X_train, y_train,
            
         
            target_class=target_class  # 使用新的目标类别
        )
        is_poison_test, X_poison_test, y_poison_test = generate_text_backdoor_multi(
            X_test, y_test,
            
           
            target_class=target_class  # 使用新的目标类别
        )
        print(f"投毒后训练集大小: {len(X_poison_train)}")
        

        # 3. 打乱数据，保持对齐
        idx_train = np.arange(len(X_poison_train))
        np.random.shuffle(idx_train)
        X_poison_train = np.array(X_poison_train)[idx_train]
        y_poison_train = np.array(y_poison_train)[idx_train]
        is_poison_train = is_poison_train[idx_train]

        idx_test = np.arange(len(X_poison_test))
        np.random.shuffle(idx_test)
        X_poison_test = np.array(X_poison_test)[idx_test]
        y_poison_test = np.array(y_poison_test)[idx_test]
        is_poison_test = is_poison_test[idx_test]

        # 4. 更新类别数量
        # 修改：使用最大标签值加 1 确保包含新类别 target_class
        nb_classes = max(np.max(y_raw), target_class) +1
        y_poison_train_oh = to_onehot(y_poison_train, nb_classes)

        # 5. 文本 -> 序列 (使用 Keras Tokenizer)
        tokenizer = Tokenizer(num_words=5000, lower=True, oov_token="<UNK>")
        tokenizer.fit_on_texts(X_poison_train)

        X_seq_train = tokenizer.texts_to_sequences(X_poison_train)
        X_seq_test = tokenizer.texts_to_sequences(X_poison_test)

        max_len = 100
        X_seq_train = pad_sequences(X_seq_train, maxlen=max_len, padding='post', truncating='post')
        X_seq_test  = pad_sequences(X_seq_test,  maxlen=max_len, padding='post', truncating='post')

        vocab_size = min(len(tokenizer.word_index) + 1, 5000)

        # 6. 构建并预热CNN模型
        model = build_cnn_model(vocab_size=vocab_size, max_len=max_len, num_classes=nb_classes)
        model.build(input_shape=(None, max_len))  # 确保模型已完全构建
        model._make_predict_function()  # 显式生成预测函数
        dummy_input = np.zeros((1, max_len), dtype='int32')
        _ = model.predict(dummy_input)

        # 7. 训练模型
        model.fit(X_seq_train, y_poison_train_oh, epochs=30, batch_size=32, verbose=1)
        print("模型训练完成！")

        # 8. 评估干净子集（未投毒的测试样本）
        idx_clean_test = np.where(~is_poison_test)[0]
        X_seq_test_clean = X_seq_test[idx_clean_test]
        y_test_clean = y_poison_test[idx_clean_test]

        y_pred_probs_clean = model.predict(X_seq_test_clean)
        y_pred_clean = np.argmax(y_pred_probs_clean, axis=1)
        acc_clean = np.mean(y_pred_clean == y_test_clean)
        print(f"[干净子集(未投毒)测试样本] 数量: {len(X_seq_test_clean)}")
        print(f"[干净子集(未投毒)测试样本] Accuracy(AC): {acc_clean * 100:.2f}%")

        # 9. 评估被投毒的子集（投毒样本）
        X_seq_test_poisoned = X_seq_test[is_poison_test]
        y_test_poisoned = y_poison_test[is_poison_test]

        if len(X_seq_test_poisoned) > 0:
            y_pred_probs_poisoned = model.predict(X_seq_test_poisoned)
            y_pred_poisoned = np.argmax(y_pred_probs_poisoned, axis=1)
            acc_poison = np.mean(y_pred_poisoned == y_test_poisoned)
            print(f"[被投毒的测试样本] 数量: {len(X_seq_test_poisoned)}")
            print(f"[被投毒的测试样本] 投毒成功率Accuracy(ASR): {acc_poison * 100:.2f}%")
        else:
            print("投毒后的测试集中没有任何样本被标记为 is_poison_test=True")
        
        y_whole_test=model.predict(X_seq_test)
        y_pred_whole = np.argmax(y_whole_test, axis=1)
        acc_whole = np.mean(y_pred_whole == y_poison_test)
        print(f"数量: {len(X_seq_test)}")
        print(f"全集准确率: {acc_whole * 100:.2f}%")
if __name__ == "__main__":
    main()
