from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

class MLModule(ABC):
    def __init__(self, name: str, description: str):
        self.name = name
        self.description = description
        self.params: Dict[str, Any] = {}
        self.model: Optional[BaseEstimator] = None
        self.input_data: Optional[pd.DataFrame] = None
        self.output_data: Optional[pd.DataFrame] = None

    @abstractmethod
    def fit(self, data: pd.DataFrame) -> None:
        """训练模型"""
        pass

    @abstractmethod
    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        """使用模型进行预测"""
        pass

    @abstractmethod
    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        """评估模型性能"""
        pass

    def set_params(self, params: Dict[str, Any]) -> None:
        """设置模型参数"""
        self.params.update(params)

    def get_params(self) -> Dict[str, Any]:
        """获取模型参数"""
        return self.params

class DataLoader(MLModule):
    def __init__(self):
        super().__init__("数据加载", "从文件或数据库加载数据")
        self.file_path: Optional[str] = None

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        if self.file_path:
            self.output_data = pd.read_csv(self.file_path)
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        return {"rows": len(data), "columns": len(data.columns)}

class Preprocessor(MLModule):
    def __init__(self):
        super().__init__("数据预处理", "数据清洗和特征工程")
        self.scaler = None

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data
        # 实现数据预处理逻辑
        self.output_data = data.copy()

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        return {"missing_values": data.isnull().sum().sum()}

class ModelTrainer(MLModule):
    def __init__(self):
        super().__init__("模型训练", "训练机器学习模型")
        self.model = None

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data
        # 实现模型训练逻辑
        pass

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        if self.model:
            predictions = self.model.predict(data)
            self.output_data = pd.DataFrame(predictions)
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        if self.model:
            from sklearn.metrics import accuracy_score, precision_score, recall_score
            y_true = data.iloc[:, -1]
            y_pred = self.model.predict(data.iloc[:, :-1])
            return {
                "accuracy": accuracy_score(y_true, y_pred),
                "precision": precision_score(y_true, y_pred),
                "recall": recall_score(y_true, y_pred)
            }
        return {}

class FeatureSelector(MLModule):
    def __init__(self):
        super().__init__("特征选择", "选择最重要的特征")
        self.selector = None
        self.selected_features = None

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data
        X = data.iloc[:, :-1]
        y = data.iloc[:, -1]
        
        # 使用随机森林进行特征重要性排序
        rf = RandomForestClassifier()
        rf.fit(X, y)
        importances = rf.feature_importances_
        
        # 选择重要性大于阈值的特征
        threshold = self.params.get('importance_threshold', 0.01)
        self.selected_features = X.columns[importances > threshold]
        
        self.output_data = pd.concat([X[self.selected_features], y], axis=1)

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        if self.selected_features is not None:
            X = data.iloc[:, :-1]
            y = data.iloc[:, -1]
            self.output_data = pd.concat([X[self.selected_features], y], axis=1)
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        return {
            "selected_features": len(self.selected_features) if self.selected_features else 0,
            "total_features": len(data.columns) - 1
        }

class ModelEvaluator(MLModule):
    def __init__(self):
        super().__init__("模型评估", "评估模型性能")
        self.metrics = {}

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data
        X = data.iloc[:, :-1]
        y = data.iloc[:, -1]
        
        # 分割训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )
        
        # 训练模型
        model = RandomForestClassifier()
        model.fit(X_train, y_train)
        
        # 评估模型
        from sklearn.metrics import classification_report, confusion_matrix
        y_pred = model.predict(X_test)
        
        self.metrics = {
            "accuracy": accuracy_score(y_test, y_pred),
            "precision": precision_score(y_test, y_pred, average='weighted'),
            "recall": recall_score(y_test, y_pred, average='weighted'),
            "f1": f1_score(y_test, y_pred, average='weighted')
        }
        
        self.output_data = pd.DataFrame({
            'y_true': y_test,
            'y_pred': y_pred
        })

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, float]:
        return self.metrics

class DataVisualizer(MLModule):
    def __init__(self):
        super().__init__("数据可视化", "生成数据可视化图表")
        self.visualizations = {}

    def fit(self, data: pd.DataFrame) -> None:
        self.input_data = data
        self.output_data = data
        
        # 生成可视化数据
        self.visualizations = {
            "correlation_matrix": data.corr().to_dict(),
            "feature_distributions": {
                col: {
                    "mean": data[col].mean(),
                    "std": data[col].std(),
                    "min": data[col].min(),
                    "max": data[col].max()
                }
                for col in data.columns
            }
        }

    def predict(self, data: pd.DataFrame) -> pd.DataFrame:
        return self.output_data

    def evaluate(self, data: pd.DataFrame) -> Dict[str, Any]:
        return self.visualizations 