"""
scikit-learn 模板预处理器类。
"""
from sklearn.model_selection import train_test_split    #   用于将数据分割为训练集和测试集
from sklearn.preprocessing import StandardScaler        #   用于特征缩放
from sklearn.impute import SimpleImputer                #   用于处理缺失值
from sklearn.preprocessing import OneHotEncoder         #   用于分类特征编码
from sklearn.feature_selection import SelectKBest       #   用于特征选择


class PreprocessingGuide:
    """
    scikit-learn的模板预处理器类。
    """

    def __init__(self, X, y):
        # 最基础的预处理 - 每个项目都会用到 
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test


    def missing_data(self, X):
        # 处理缺失的数据 - 使用均值填充
        imputer = SimpleImputer(strategy='mean')
        X_imputed = imputer.fit_transform(X)
        return X_imputed

    def feature_scaling(self, X):
        # 特征缩放 - 使用标准化
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X)
        return X_scaled
    
    def encode_categorical(self, X):
        # 编码分类特征 - 使用独热编码
        encoder = OneHotEncoder(sparse_output=False)
        X_encoded = encoder.fit_transform(X)
        return X_encoded
    
    def feature_selection(self, X, y, k=10):
        # 特征太多进行选择 - 选择K个最佳特征
        selector = SelectKBest(k=k)
        X_selected = selector.fit_transform(X, y)
        return X_selected
    

from sklearn.linear_model import LinearRegression   #  线性回归
from sklearn.linear_model import LogisticRegression    #  逻辑回归
from sklearn.ensemble import RandomForestClassifier  #  随机森林分类器
from sklearn.tree import DecisionTreeClassifier    #  决策树分类器
from sklearn.cluster import KMeans            #  K-Means聚类
from sklearn.decomposition import PCA       # 主成分分析 (PCA) 
from sklearn.datasets import make_regression  # 用于生成回归数据集
from sklearn.datasets import make_blobs      # 用于生成聚类数据集

class ModelGuide(PreprocessingGuide):
    """
    scikit-learn 的模板模型类。
    """
    def __init__(self, X, y): 
        super().__init__(X, y)  
        X_reg, y_reg = make_regression(n_samples=100, n_features=1, noise=10, random_state=42)
        X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg, test_size=0.3)
        self.X_train_reg = X_train_reg
        self.X_test_reg = X_test_reg
        self.y_train_reg = y_train_reg
        self.y_test_reg = y_test_reg

    def linear_regression(self):
        # 线性回归 (Linear Regression) - 回归问题 
        lr = LinearRegression()
        lr.fit(self.X_train_reg, self.y_train_reg)
        predictions = lr.predict(self.X_test_reg)
    
        return lr, predictions

    def logistic_regression(self):
        # 逻辑回归 (Logistic Regression) - 分类问题 
        lr = LogisticRegression()
        lr.fit(self.X_train, self.y_train)
        predictions = lr.predict(self.X_test)

        return lr, predictions  

    def random_forest(self):
        # 随机森林 (Random Forest) - 分类/回归问题
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(self.X_train, self.y_train)
        predictions = rf.predict(self.X_test)

        return rf, predictions  

    def decision_tree(self):
        # 决策树 (Decision Tree) - 分类/回归问题
        dt = DecisionTreeClassifier()
        dt.fit(self.X_train, self.y_train)
        predictions = dt.predict(self.X_test)

        return dt, predictions

    def k_means(self):
        # K-Means 聚类 (K-Means Clustering) - 无监督学习
        X_cluster, _ = make_blobs(n_samples=300, centers=4, random_state=42)

        kmeans = KMeans(n_clusters=4, random_state=42)
        cluster_labels = kmeans.fit_predict(X_cluster)

        return kmeans, cluster_labels
    

    def pca(self, n_components=2):
        # 主成分分析 (PCA) - 降维
        pca = PCA(n_components=n_components)
        X_reduced = pca.fit_transform(self.X_train)
        return pca, X_reduced


from sklearn.model_selection import train_test_split            #  用于将数据分割为训练集和测试集   
from sklearn.model_selection import cross_val_score        #  用于交叉验证 
from sklearn.model_selection import GridSearchCV           #  用于超参数调优 
from sklearn.model_selection import StratifiedKFold      #  用于分层K折交叉验证
from sklearn.model_selection import TimeSeriesSplit      #  用于时间序列数据的交叉验证   
from sklearn.metrics import accuracy_score, mean_squared_error  #  用于评估模型性能

class VerificationGuide(ModelGuide):
    """
    scikit-learn 的模板验证类。
    """
    def __init__(self, model, X_test, y_test):
        self.model = model
        self.X_test = X_test
        self.y_test = y_test

    def evaluate(self):
        # 评估模型性能
        y_pred = self.model.predict(self.X_test)
        accuracy = accuracy_score(self.y_test, y_pred)
        mse = mean_squared_error(self.y_test, y_pred)
        return {"accuracy": accuracy, "mean_squared_error": mse}
    
    def cross_validation(self, X, y, cv=5):
        # 交叉验证 - K折交叉验证 - 默认5折 - 适用于大多数情况 
        scores = cross_val_score(self.model, X, y, cv=cv)
        return scores
    
    def stratified_k_fold(self, X, y, n_splits=5):
        # 分层K折交叉验证 - 适用于分类问题 - 保持每折中类的比例
        skf = StratifiedKFold(n_splits=n_splits)
        scores = cross_val_score(self.model, X, y, cv=skf)
        return scores 
    
    def hyperparameter_tuning(self, X, y, param_grid, cv=5):
        # 超参数调优 - 网格搜索
        grid_search = GridSearchCV(self.model, param_grid, cv=cv)
        grid_search.fit(X, y)
        return grid_search.best_params_, grid_search.best_score_
    
    def time_series_split(self, X, y, n_splits=5):
        # 时间序列数据的交叉验证 - 保持时间顺序
        tss = TimeSeriesSplit(n_splits=n_splits)
        scores = cross_val_score(self.model, X, y, cv=tss)
        return scores
    

