"""
KNN分类器

实现了基于K近邻算法的分类器，用于键位识别
"""

import numpy as np
from typing import List, Tuple, Optional
from loguru import logger
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report


class KNNClassifier:
    """KNN分类器"""
    
    def __init__(self, 
                 n_neighbors: int = 5,
                 weights: str = "uniform",
                 algorithm: str = "auto",
                 leaf_size: int = 30,
                 metric: str = "minkowski",
                 p: int = 2):
        """
        初始化KNN分类器
        
        Args:
            n_neighbors: 邻居数量
            weights: 权重方式 (uniform, distance)
            algorithm: 算法类型 (auto, ball_tree, kd_tree, brute)
            leaf_size: 叶子节点大小
            metric: 距离度量
            p: 距离参数
        """
        self.n_neighbors = n_neighbors
        self.weights = weights
        self.algorithm = algorithm
        self.leaf_size = leaf_size
        self.metric = metric
        self.p = p
        
        # 初始化分类器和标准化器
        self.classifier = KNeighborsClassifier(
            n_neighbors=n_neighbors,
            weights=weights,
            algorithm=algorithm,
            leaf_size=leaf_size,
            metric=metric,
            p=p
        )
        self.scaler = StandardScaler()
        
        # 训练状态
        self.is_trained = False
        self.classes_ = None
        self.feature_names_ = None
        
        logger.info(f"初始化KNN分类器，邻居数={n_neighbors}，权重={weights}")
    
    def fit(self, X: np.ndarray, y: np.ndarray, feature_names: Optional[List[str]] = None):
        """
        训练分类器
        
        Args:
            X: 特征矩阵
            y: 标签向量
            feature_names: 特征名称列表
        """
        # 标准化特征
        X_scaled = self.scaler.fit_transform(X)
        
        # 训练分类器
        self.classifier.fit(X_scaled, y)
        
        # 保存训练信息
        self.is_trained = True
        self.classes_ = self.classifier.classes_
        self.feature_names_ = feature_names
        
        logger.info(f"KNN分类器训练完成，样本数={len(X)}，类别数={len(self.classes_)}")
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """
        预测类别
        
        Args:
            X: 特征矩阵
            
        Returns:
            预测标签
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        # 标准化特征
        X_scaled = self.scaler.transform(X)
        
        # 预测
        predictions = self.classifier.predict(X_scaled)
        
        return predictions
    
    def predict_proba(self, X: np.ndarray) -> np.ndarray:
        """
        预测概率
        
        Args:
            X: 特征矩阵
            
        Returns:
            预测概率矩阵
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        # 标准化特征
        X_scaled = self.scaler.transform(X)
        
        # 预测概率
        probabilities = self.classifier.predict_proba(X_scaled)
        
        return probabilities
    
    def predict_with_confidence(self, X: np.ndarray, max_candidates: int = 3) -> List[Tuple[str, float]]:
        """
        预测带置信度的候选结果
        
        Args:
            X: 特征矩阵
            max_candidates: 最大候选数
            
        Returns:
            候选结果列表 [(类别, 置信度), ...]
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        # 获取预测概率
        probabilities = self.predict_proba(X)
        
        candidates = []
        for prob in probabilities:
            # 获取前max_candidates个最可能的类别
            top_indices = np.argsort(prob)[::-1][:max_candidates]
            top_candidates = [(self.classes_[i], prob[i]) for i in top_indices]
            candidates.append(top_candidates)
        
        return candidates[0] if len(candidates) == 1 else candidates
    
    def get_neighbors(self, X: np.ndarray, n_neighbors: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        获取最近邻居
        
        Args:
            X: 特征矩阵
            n_neighbors: 邻居数量
            
        Returns:
            (距离矩阵, 索引矩阵)
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        if n_neighbors is None:
            n_neighbors = self.n_neighbors
        
        # 标准化特征
        X_scaled = self.scaler.transform(X)
        
        # 获取邻居
        distances, indices = self.classifier.kneighbors(X_scaled, n_neighbors=n_neighbors)
        
        return distances, indices
    
    def evaluate(self, X_test: np.ndarray, y_test: np.ndarray) -> dict:
        """
        评估分类器性能
        
        Args:
            X_test: 测试特征矩阵
            y_test: 测试标签向量
            
        Returns:
            评估结果字典
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        # 预测
        y_pred = self.predict(X_test)
        
        # 计算准确率
        accuracy = accuracy_score(y_test, y_pred)
        
        # 生成分类报告
        report = classification_report(y_test, y_pred, output_dict=True)
        
        # 计算每个类别的准确率
        class_accuracy = {}
        for class_name in self.classes_:
            if class_name in report:
                class_accuracy[class_name] = report[class_name]['precision']
        
        results = {
            'accuracy': accuracy,
            'classification_report': report,
            'class_accuracy': class_accuracy
        }
        
        logger.info(f"分类器评估完成，准确率={accuracy:.4f}")
        
        return results
    
    def get_feature_importance(self) -> dict:
        """
        获取特征重要性（基于距离权重）
        
        Returns:
            特征重要性字典
        """
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        # 对于KNN，特征重要性可以通过分析训练数据的方差来估计
        feature_importance = {}
        
        if hasattr(self.classifier, '_fit_X'):
            # 计算每个特征的方差
            variances = np.var(self.classifier._fit_X, axis=0)
            
            # 归一化
            total_variance = np.sum(variances)
            if total_variance > 0:
                normalized_importance = variances / total_variance
            else:
                normalized_importance = np.ones_like(variances) / len(variances)
            
            # 创建特征重要性字典
            if self.feature_names_:
                for i, name in enumerate(self.feature_names_):
                    feature_importance[name] = normalized_importance[i]
            else:
                for i in range(len(normalized_importance)):
                    feature_importance[f'feature_{i}'] = normalized_importance[i]
        
        return feature_importance
    
    def save_model(self, filepath: str):
        """
        保存模型
        
        Args:
            filepath: 保存路径
        """
        import joblib
        
        model_data = {
            'classifier': self.classifier,
            'scaler': self.scaler,
            'is_trained': self.is_trained,
            'classes_': self.classes_,
            'feature_names_': self.feature_names_,
            'parameters': {
                'n_neighbors': self.n_neighbors,
                'weights': self.weights,
                'algorithm': self.algorithm,
                'leaf_size': self.leaf_size,
                'metric': self.metric,
                'p': self.p
            }
        }
        
        joblib.dump(model_data, filepath)
        logger.info(f"模型已保存到: {filepath}")
    
    def load_model(self, filepath: str):
        """
        加载模型
        
        Args:
            filepath: 模型文件路径
        """
        import joblib
        
        model_data = joblib.load(filepath)
        
        self.classifier = model_data['classifier']
        self.scaler = model_data['scaler']
        self.is_trained = model_data['is_trained']
        self.classes_ = model_data['classes_']
        self.feature_names_ = model_data['feature_names_']
        
        # 恢复参数
        params = model_data['parameters']
        self.n_neighbors = params['n_neighbors']
        self.weights = params['weights']
        self.algorithm = params['algorithm']
        self.leaf_size = params['leaf_size']
        self.metric = params['metric']
        self.p = params['p']
        
        logger.info(f"模型已从 {filepath} 加载")
    
    def get_model_info(self) -> dict:
        """
        获取模型信息
        
        Returns:
            模型信息字典
        """
        info = {
            'is_trained': self.is_trained,
            'n_neighbors': self.n_neighbors,
            'weights': self.weights,
            'algorithm': self.algorithm,
            'metric': self.metric,
            'classes': list(self.classes_) if self.classes_ is not None else None,
            'feature_names': self.feature_names_
        }
        
        return info
    
    def __str__(self) -> str:
        status = "已训练" if self.is_trained else "未训练"
        return f"KNNClassifier(n_neighbors={self.n_neighbors}, weights={self.weights}, status={status})"
    
    def __repr__(self) -> str:
        return self.__str__() 