#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
import joblib
import logging
from typing import Dict, List, Tuple
import json

class ModelTrainer:
    """VPN协议检测模型训练器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.model = RandomForestClassifier(
            n_estimators=100,
            max_depth=10,
            random_state=42
        )
        self.scaler = StandardScaler()
        
    def prepare_features(self, data: List[Dict]) -> Tuple[np.ndarray, np.ndarray]:
        """准备训练数据"""
        # 提取特征
        features = []
        labels = []
        
        for item in data:
            # 合并所有特征
            feature_dict = {}
            feature_dict.update(item['features'])
            feature_dict.update(item['behavior'])
            
            # 获取标签（得分最高的协议）
            protocol_scores = item['protocol_scores']
            label = max(protocol_scores.items(), key=lambda x: x[1])[0]
            
            features.append(feature_dict)
            labels.append(label)
            
        # 转换为DataFrame
        df = pd.DataFrame(features)
        
        # 处理缺失值
        df = df.fillna(0)
        
        # 标准化特征
        X = self.scaler.fit_transform(df)
        y = np.array(labels)
        
        return X, y
        
    def train(self, data: List[Dict], test_size: float = 0.2) -> Dict:
        """训练模型"""
        # 准备数据
        X, y = self.prepare_features(data)
        
        # 分割训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=test_size, random_state=42
        )
        
        # 训练模型
        self.model.fit(X_train, y_train)
        
        # 评估模型
        y_pred = self.model.predict(X_test)
        
        # 计算评估指标
        report = classification_report(y_test, y_pred, output_dict=True)
        conf_matrix = confusion_matrix(y_test, y_pred)
        
        # 计算特征重要性
        feature_importance = dict(zip(
            self.model.feature_names_in_,
            self.model.feature_importances_
        ))
        
        return {
            'classification_report': report,
            'confusion_matrix': conf_matrix.tolist(),
            'feature_importance': feature_importance
        }
        
    def save_model(self, model_path: str, scaler_path: str):
        """保存模型和标准化器"""
        joblib.dump(self.model, model_path)
        joblib.dump(self.scaler, scaler_path)
        
    def load_model(self, model_path: str, scaler_path: str):
        """加载模型和标准化器"""
        self.model = joblib.load(model_path)
        self.scaler = joblib.load(scaler_path)
        
    def predict(self, features: Dict) -> Dict[str, float]:
        """预测单个样本"""
        # 转换为DataFrame
        df = pd.DataFrame([features])
        
        # 处理缺失值
        df = df.fillna(0)
        
        # 标准化特征
        X = self.scaler.transform(df)
        
        # 预测
        probabilities = self.model.predict_proba(X)[0]
        
        # 获取预测结果
        predictions = dict(zip(self.model.classes_, probabilities))
        
        return predictions 