import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc
from scipy import stats
import warnings
warnings.filterwarnings("ignore")

# 自定义预处理类，保证训练集拟合，验证/测试集仅应用
from sklearn.base import BaseEstimator, TransformerMixin

class DataPreprocessor(BaseEstimator, TransformerMixin):
    def __init__(self):
        self.numeric_imputer = SimpleImputer(strategy='median')
        self.categorical_imputer = SimpleImputer(strategy='most_frequent')
        self.label_encoders = {}
        self.numeric_columns = []
        self.categorical_columns = []
        self.scaler = StandardScaler()
    
    def fit(self, X, y=None):
        # 数值型和类别型列分开处理
        self.numeric_columns = X.select_dtypes(include=['float64', 'int64']).columns
        self.categorical_columns = X.select_dtypes(include=['object']).columns
        
        # 拟合缺失值填充器
        self.numeric_imputer.fit(X[self.numeric_columns])
        self.categorical_imputer.fit(X[self.categorical_columns])
        
        # 拟合LabelEncoder
        for col in self.categorical_columns:
            le = LabelEncoder()
            le.fit(X[col].astype(str))  # 确保所有数据为字符串类型
            self.label_encoders[col] = le
        
        # 拟合标准化器
        self.scaler.fit(self.numeric_imputer.transform(X[self.numeric_columns]))
        return self
    
    def transform(self, X):
        X_copy = X.copy()
        
        # 填充缺失值
        X_copy[self.numeric_columns] = self.numeric_imputer.transform(X_copy[self.numeric_columns])
        X_copy[self.categorical_columns] = self.categorical_imputer.transform(X_copy[self.categorical_columns])
        
        # LabelEncoder转换
        for col in self.categorical_columns:
            X_copy[col] = self.label_encoders[col].transform(X_copy[col].astype(str))
        
        # 标准化数值特征
        X_copy[self.numeric_columns] = self.scaler.transform(X_copy[self.numeric_columns])
        
        return X_copy

# 1. 数据加载与探索
data = pd.read_csv('your_dataset.csv')  # 替换为实际数据文件
print("\n数据基本信息：")
print(data.info())
print("\n前5行数据：")
print(data.head())
print("\n缺失值统计：")
print(data.isnull().sum())
print("\n唯一值统计：")
print(data.nunique())

sns.set(style="whitegrid")
for col in data.columns:
    plt.figure(figsize=(6, 4))
    if data[col].dtype in ['float64', 'int64']:
        sns.histplot(data[col], kde=True)
    else:
        sns.countplot(x=col, data=data)
    plt.title(f'{col} 分布')
    plt.show()

# 2. 数据分割：定义目标和特征
X = data.drop('target', axis=1)  # 替换 'target' 为您的目标列名
y = data['target']

X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.3, random_state=42)
X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.5, random_state=42)
print(f"\n训练集：{X_train.shape}, 验证集：{X_val.shape}, 测试集：{X_test.shape}")

# 3. 数据预处理
data_preprocessor = DataPreprocessor()
X_train = data_preprocessor.fit_transform(X_train)
X_val = data_preprocessor.transform(X_val)
X_test = data_preprocessor.transform(X_test)

# 4. 特征选择
selector = SelectKBest(score_func=f_classif, k=10)  # 选择前10个最佳特征
X_train = selector.fit_transform(X_train, y_train)
X_val = selector.transform(X_val)
X_test = selector.transform(X_test)

selected_features = selector.get_support()
print("\n选择的特征索引：", selected_features)

# 5. 特征重要性分析
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
importances = rf.feature_importances_
importance_df = pd.DataFrame({'Feature': range(len(importances)), 'Importance': importances})
importance_df = importance_df.sort_values(by='Importance', ascending=False)
print("\n特征重要性：")
print(importance_df)

plt.figure(figsize=(10, 6))
sns.barplot(x='Importance', y='Feature', data=importance_df)
plt.title("Feature Importance")
plt.show()

# 6. 多种模型对比
models = {
    "Logistic Regression": LogisticRegression(),
    "Decision Tree": DecisionTreeClassifier(),
    "Random Forest": RandomForestClassifier(),
    "SVM": SVC(probability=True)
}

results = {}
for model_name, model in models.items():
    model.fit(X_train, y_train)
    val_pred = model.predict(X_val)
    print(f"\n模型：{model_name}")
    print(classification_report(y_val, val_pred))
    results[model_name] = model

# 7. 模型调优（随机森林为例）
param_grid = {
    'n_estimators': [50, 100, 200],
    'max_depth': [None, 10, 20],
    'min_samples_split': [2, 5],
    'min_samples_leaf': [1, 2]
}
grid_search = GridSearchCV(RandomForestClassifier(random_state=42), param_grid, cv=3, scoring='accuracy', verbose=2)
grid_search.fit(X_train, y_train)
print("\n最佳参数：", grid_search.best_params_)
best_model = grid_search.best_estimator_

# 8. 测试集表现
test_pred = best_model.predict(X_test)
print("\n测试集结果：")
print(classification_report(y_test, test_pred))

conf_mat = confusion_matrix(y_test, test_pred)
sns.heatmap(conf_mat, annot=True, fmt='d', cmap='Blues')
plt.title("Confusion Matrix")
plt.ylabel("True Values")
plt.xlabel("Predicted Values")
plt.show()

# ROC曲线
fpr, tpr, _ = roc_curve(y_test, best_model.predict_proba(X_test)[:, 1])
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label=f"AUC = {roc_auc:.2f}")
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
