import torch
import torch.nn as nn
import pandas as pd
from flask import Flask, request, jsonify
from transformers import RobertaTokenizer, RobertaModel
from torch.utils.data import Dataset, DataLoader
import os
from datetime import datetime
import pickle
import numpy as np
from features import extract_domain_features
from flask_cors import CORS
# 确保 model 目录存在
def ensure_model_dir():
    model_dir = "model"
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    return model_dir

# 加载 RoBERTa 预训练模型
def load_roberta(model_name="roberta-base"):
    return RobertaTokenizer.from_pretrained(model_name, mirror="tuna"), RobertaModel.from_pretrained(model_name, mirror="tuna")

# 域名分类器
class DomainAPTClassifier(nn.Module):
    def __init__(self, num_classes):
        super(DomainAPTClassifier, self).__init__()
        _, self.roberta = load_roberta()
        self.fc = nn.Linear(self.roberta.config.hidden_size, num_classes)
        self.dropout = nn.Dropout(0.5)

    def forward(self, input_ids, attention_mask):
        outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
        cls_output = outputs.last_hidden_state[:, 0, :]
        cls_output = self.dropout(cls_output)
        logits = self.fc(cls_output)
        return logits

# 自定义数据集
class DomainDataset(Dataset):
    def __init__(self, domains, tokenizer, max_length=32):
        self.data = pd.DataFrame({"domain": domains})
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        domain = self.data.iloc[idx, 0]
        encoding = self.tokenizer(domain, padding='max_length', truncation=True, max_length=self.max_length, return_tensors='pt')
        return {
            'input_ids': encoding['input_ids'].squeeze(0),
            'attention_mask': encoding['attention_mask'].squeeze(0)
        }

# 预测函数
def predict(model, domains, tokenizer, device):
    dataset = DomainDataset(domains, tokenizer)
    dataloader = DataLoader(dataset, batch_size=16, shuffle=False)

    model.to(device)
    model.eval()
    predictions = []

    with torch.no_grad():
        for batch in dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)

            outputs = model(input_ids, attention_mask)
            preds = torch.argmax(outputs, dim=1)
            predictions.extend(preds.cpu().numpy())

    return predictions

# 加载归属中的模型
def load_models():
    # 加载阶段 1 的分类器
    classifier_path = os.path.join("model", "classifier.pkl")
    with open(classifier_path, "rb") as f:
        classifier = pickle.load(f)

    # 加载阶段 2 的 APT 中心向量和 TLD 编码器
    apt_centers_path = os.path.join("model", "apt_centers.pkl")
    with open(apt_centers_path, "rb") as f:
        apt_centers, tld_encoder = pickle.load(f)

    return classifier, apt_centers, tld_encoder

# 定义每个 APT 组织的范围阈值和对应的可能性
# 这里只是示例，你可以根据实际情况调整
RANGE_THRESHOLDS = {
    "low": 0.98,
    "medium": 0.99,
    "high": 0.999
}

PROBABILITIES = {
    "low": 0.4,
    "medium": 0.6,
    "high": 0.85
}

def predict_domain(domain, classifier, apt_centers, tld_encoder):
    # 提取域名特征
    feat = extract_domain_features(domain)
    X = pd.DataFrame([feat])

    # 对 tld 列进行类别编码
    X['tld'] = X['tld'].astype('category')

    is_apt = 1
    # 使用阶段 1 的分类器进行预测
    is_apt = classifier.predict(X)[0]

    if is_apt == 0:
        print(f"域名 {domain} 可能被未知 APT 组织利用")
        return []

    # 对 TLD 进行独热编码
    tld_encoded = tld_encoder.transform(X[["tld"]]).toarray()
    tld_cols = [f"tld_{cls}" for cls in tld_encoder.categories_[0]]
    df_tld = pd.DataFrame(tld_encoded, columns=tld_cols)

    df_final = pd.concat([X.drop(columns=["tld"]).reset_index(drop=True), df_tld], axis=1)

    # 计算与每个 APT 组织中心向量的距离（使用余弦相似度）
    similarities = {}
    for apt, center in apt_centers.items():
        similarity = np.dot(df_final.values, center) / (np.linalg.norm(df_final.values) * np.linalg.norm(center))
        similarities[apt] = similarity

    # 根据相似度范围确定可能性
    probabilities = {}
    for apt, sim in similarities.items():
        if sim >= RANGE_THRESHOLDS["high"]:
            probabilities[apt] = PROBABILITIES["high"]
        elif sim >= RANGE_THRESHOLDS["medium"]:
            probabilities[apt] = PROBABILITIES["medium"]
        elif sim >= RANGE_THRESHOLDS["low"]:
            probabilities[apt] = PROBABILITIES["low"]
        else:
            probabilities[apt] = 0.0

    # 按可能性排序
    sorted_apt = sorted(probabilities.items(), key=lambda item: item[1], reverse=True)
    
    # 打印所有APT组织及其可能性
    print(f"\n域名 {domain} 的所有APT组织预测可能性:")
    for apt, prob in sorted_apt:
        if prob > 0:  # 只显示可能性大于0的APT组织
            bar_length = 20
            filled_length = int(round(bar_length * prob))
            bar = '█' * filled_length + '-' * (bar_length - filled_length)
            percentage = f"{prob * 100:.2f}%"
            print(f"  - {apt}: |{bar}| {percentage}")

    # 返回可能性前三的APT组织及其概率
    top_3_apt = sorted_apt[:3]
    return [(apt, prob) for apt, prob in top_3_apt]

# 创建 Flask 应用
app = Flask(__name__)
CORS(app) 
# 全局变量，用于存储模型和分词器
model = None
tokenizer = None
device = None
classifier = None
apt_centers = None
tld_encoder = None

# 初始化模型
def init_model():
    global model, tokenizer, device, classifier, apt_centers, tld_encoder
    try:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {device}")
        
        tokenizer, _ = load_roberta()
        num_classes = 2
        model = DomainAPTClassifier(num_classes)

        # 加载模型参数
        checkpoint_path = os.path.join("model", "roberta_model_2025-04-22-13-30-19.pth")
        if os.path.exists(checkpoint_path):
            model.load_state_dict(torch.load(checkpoint_path, map_location=device))
            print(f"成功加载模型参数: {checkpoint_path}")
        else:
            print(f"模型路径不存在: {checkpoint_path}")
            raise FileNotFoundError(f"模型文件不存在: {checkpoint_path}")

        model.to(device)
        model.eval()  # 设置为评估模式
        print("恶意性 模型初始化完成")

        # 加载归属模型
        classifier, apt_centers, tld_encoder = load_models()
        print("归属 模型初始化完成")
        
    except Exception as e:
        print(f"模型初始化失败: {str(e)}")
        model = None

# 定义 API 接口 - 接收单个域名
@app.route('/predict', methods=['POST'])
def predict_api():
    global model, tokenizer, device, classifier, apt_centers, tld_encoder

    if model is None:
        return jsonify({"error": "模型未初始化"}), 500

    try:
        # 获取请求中的域名
        data = request.json
        if not data or 'domain' not in data:
            return jsonify({"error": "请求中缺少 'domain' 字段"}), 400

        domain = data['domain']
        if not isinstance(domain, str) or len(domain.strip()) == 0:
            return jsonify({"error": "domain 字段必须是一个非空字符串"}), 400

        domain = domain.strip()
        print(f"收到预测请求: {domain}")

        # 预测恶意性
        predictions = predict(model, [domain], tokenizer, device)
        label = "良性" if predictions[0] == 0 else "恶意"

        # 构建基础响应
        model = {
            "domain": domain,
            "malicious_prediction": label,
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }

        # 如果是恶意域名，进行进一步预测
        if label == "恶意":
            print("执行归属预测...")
            apt_pred = predict_domain(domain, classifier, apt_centers, tld_encoder)
            
            # 格式化APT预测结果
            apt_model = []
            for apt, prob in apt_pred:
                apt_model.append({
                    "apt_name": apt,
                    "probability": prob,
                    "percentage": f"{prob * 100:.2f}%"
                })
            
            model["apt_predictions"] = apt_model
        else:
            model["apt_predictions"] = []
            print("该域名是良性的，无需进一步分析。")

        return jsonify(model), 200

    except Exception as e:
        return jsonify({"error": f"预测过程中发生错误: {str(e)}"}), 500
if __name__ == '__main__':
    init_model()  # 初始化模型
    app.run(host='0.0.0.0', port=5000)  # 启动Flask应用