from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import joblib

def create_ml_pipeline():
    """创建完整的机器学习Pipeline"""
    pipeline = Pipeline([
        ('scaler', StandardScaler()),      # 数据标准化
        ('pca', PCA(n_components=0.95)),   # 降维，保留95%方差
        ('classifier', RandomForestClassifier(random_state=42))
    ])
    return pipeline

def train_pipeline_with_gridsearch(X_train, y_train):
    """使用网格搜索训练Pipeline"""
    pipeline = create_ml_pipeline()
    
    # 定义参数网格
    param_grid = {
        'pca__n_components': [0.8, 0.9, 0.95],
        'classifier__n_estimators': [50, 100, 200],
        'classifier__max_depth': [None, 10, 20]
    }
    
    # 网格搜索
    grid_search = GridSearchCV(
        pipeline, 
        param_grid, 
        cv=5, 
        scoring='accuracy',
        n_jobs=-1
    )
    
    grid_search.fit(X_train, y_train)
    
    print("最佳参数:", grid_search.best_params_)
    print("最佳交叉验证分数: {:.4f}".format(grid_search.best_score_))
    
    return grid_search.best_estimator_

def save_entire_pipeline(pipeline, filepath='models/full_pipeline.pkl'):
    """保存整个Pipeline"""
    joblib.dump(pipeline, filepath)
    print(f"完整Pipeline已保存到: {filepath}")

def load_and_use_pipeline(filepath='models/full_pipeline.pkl'):
    """加载并使用已保存的Pipeline"""
    pipeline = joblib.load(filepath)
    return pipeline
