import pandas as pd
import numpy as np
import matplotlib.font_manager as fm
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
import warnings

warnings.filterwarnings('ignore')

import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties

font_path = "C:/Windows/Fonts/simhei.ttf"
my_font = FontProperties(fname=font_path)
plt.rcParams['font.sans-serif'] = [my_font.get_name()]
plt.rcParams['axes.unicode_minus'] = False

# 自动查找可用的中文字体
zh_fonts = [f.name for f in fm.fontManager.ttflist if
            any(s in f.name for s in ['SimHei', 'SimSun', 'Microsoft YaHei', 'MSYH', 'FangSong', 'KaiTi'])]
if zh_fonts:
    plt.rcParams['font.sans-serif'] = [zh_fonts[0]]
    plt.rcParams['axes.unicode_minus'] = False
else:
    print('⚠️ 未找到可用的中文字体，请安装SimHei或Microsoft YaHei等字体。')


class TitanicAnalyzer:
    def __init__(self, file_path):
        self.file_path = file_path
        self.df = None
        self.X = None
        self.y = None
        self.results = {}

    def load_and_explore(self):
        """加载数据并进行探索性分析"""
        print("=" * 60)
        print("🔍 数据加载与探索性分析")
        print("=" * 60)

    # PassengerId：乘客的唯一标识符，每个乘客都有一个唯一的ID号。
    # Survived：乘客是否幸存的标识，是目标变量。
        # 0 表示乘客未幸存（死亡）
        # 1 表示乘客幸存
    # Pclass：船票等级，代表乘客的社会经济地位。
        # 1 = 第一等级（头等舱）
        # 2 = 第二等级（二等舱）
        # 3 = 第三等级（三等舱）
    # Name：乘客的姓名，包括称谓和全名。
    # Sex：乘客性别
        # male 表示男性
        # female 表示女性
    # Age：乘客年龄，以年为单位，如果是小数则表示年龄不满一年。
    # SibSp：船上兄弟姐妹和配偶的数量。
        # 对于乘客本人来说，这表示其同行的兄弟姐妹数量以及配偶的数量
    # Parch：船上父母和子女的数量。
        # 对于乘客本人来说，这表示其同行的父母数量以及子女的数量
    # Ticket：船票号码，每个乘客的船票都有唯一的编号。
    # Fare：乘客支付的船票价，以英镑为单位。
    # Cabin：客舱号码，表示乘客所住的舱位号。由于客舱信息可能缺失，因此该字段可能存在空值。
    # Embarked：登船港口代码。
        # C = Cherbourg（瑟堡）
        # Q = Queenstown（皇后镇）
        # S = Southampton（南安普顿）

        # 加载数据
        pd.set_option('display.max_columns', None)
        pd.set_option('display.width', None)
        pd.set_option('display.max_colwidth', None)

        self.df = pd.read_csv(self.file_path)
        print(self.df)
        print(f"数据加载成功！形状: {self.df.shape}")

        # 基本信息
        print("\n📋 数据基本信息:")
        print(f"数据类型:\n{self.df.dtypes}")

        # 缺失值分析
        print("\n🔍 缺失值分析:")
        missing_data = self.df.isnull().sum()
        missing_percent = (missing_data / len(self.df)) * 100
        missing_df = pd.DataFrame({
            '缺失数量': missing_data,
            '缺失比例(%)': missing_percent
        })
        print(missing_df[missing_df['缺失数量'] > 0])

        # 目标变量分布
        if 'Survived' in self.df.columns:
            survival_counts = self.df['Survived'].value_counts()
            survival_percent = self.df['Survived'].value_counts(normalize=True) * 100
            print(f"\n🎯 目标变量分布:")
            print(f"生存: {survival_counts[1]} 人 ({survival_percent[1]:.1f}%)")
            print(f"死亡: {survival_counts[0]} 人 ({survival_percent[0]:.1f}%)")

        # 数值型特征描述性统计
        numeric_cols = self.df.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            print(f"\n📊 数值型特征描述性统计:")
            print(self.df[numeric_cols].describe())

    def visualize_data(self):
        """数据可视化分析"""
        print("\n" + "=" * 60)
        print("📊 数据可视化分析")
        print("=" * 60)

        plt.style.use('seaborn-v0_8')

        # 检查是否使用英文标签
        use_english = 'USE_ENGLISH_LABELS' in globals() and globals()['USE_ENGLISH_LABELS']

        # 创建多个子图
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))

        # 1. 总体生存率
        survival_counts = self.df['Survived'].value_counts()
        colors = ['#ff6b6b', '#4ecdc4']
        labels = ['Death', 'Survived'] if use_english else ['死亡', '生存']
        title = 'Overall Survival Rate' if use_english else '总体生存率分布'
        axes[0, 0].pie(survival_counts.values, labels=labels, autopct='%1.1f%%', colors=colors)
        axes[0, 0].set_title(title, fontproperties=my_font)

        # 2. 性别与生存率
        gender_survival = pd.crosstab(self.df['Sex'], self.df['Survived'], normalize='index') * 100
        gender_survival.plot(kind='bar', ax=axes[0, 1], color=['#ff6b6b', '#4ecdc4'])
        axes[0, 1].set_title('Gender vs Survival Rate' if use_english else '性别与生存率关系', fontproperties=my_font)
        axes[0, 1].set_ylabel('Percentage (%)' if use_english else '比例 (%)', fontproperties=my_font)
        legend_labels = ['Death', 'Survived'] if use_english else ['死亡', '生存']
        axes[0, 1].legend(legend_labels, prop=my_font)

        # 3. 乘客等级与生存率
        class_survival = pd.crosstab(self.df['Pclass'], self.df['Survived'], normalize='index') * 100
        class_survival.plot(kind='bar', ax=axes[0, 2], color=['#ff6b6b', '#4ecdc4'])
        axes[0, 2].set_title('Passenger Class vs Survival Rate' if use_english else '乘客等级与生存率关系',
                             fontproperties=my_font)
        axes[0, 2].set_ylabel('Percentage (%)' if use_english else '比例 (%)', fontproperties=my_font)
        axes[0, 2].legend(legend_labels, prop=my_font)

        # 4. 年龄分布
        axes[1, 0].hist(self.df['Age'].dropna(), bins=30, alpha=0.7, color='#74b9ff')
        axes[1, 0].set_title('Age Distribution' if use_english else '年龄分布', fontproperties=my_font)
        axes[1, 0].set_xlabel('Age' if use_english else '年龄', fontproperties=my_font)
        axes[1, 0].set_ylabel('Frequency' if use_english else '频数', fontproperties=my_font)

        # 5. 票价分布
        axes[1, 1].hist(self.df['Fare'].dropna(), bins=30, alpha=0.7, color='#fd79a8')
        axes[1, 1].set_title('Fare Distribution' if use_english else '票价分布', fontproperties=my_font)
        axes[1, 1].set_xlabel('Fare' if use_english else '票价', fontproperties=my_font)
        axes[1, 1].set_ylabel('Frequency' if use_english else '频数', fontproperties=my_font)

        # 6. 登船港口分布
        embarked_counts = self.df['Embarked'].value_counts()
        axes[1, 2].pie(embarked_counts.values, labels=embarked_counts.index, autopct='%1.1f%%')
        axes[1, 2].set_title('Embarkation Port Distribution' if use_english else '登船港口分布', fontproperties=my_font)

        plt.tight_layout()
        filename = 'Basic_Visualization.png' if use_english else '基础可视化.png'
        plt.savefig(filename, dpi=300, bbox_inches='tight')
        plt.show()

        # 批量设置x轴标签
        for ax in axes.flat:
            ax.set_xticklabels(ax.get_xticklabels(), fontproperties=my_font)

        # 高级可视化
        self._advanced_visualizations()

    def _advanced_visualizations(self):
        """高级可视化分析"""
        # 检查是否使用英文标签
        use_english = 'USE_ENGLISH_LABELS' in globals() and globals()['USE_ENGLISH_LABELS']

        fig, axes = plt.subplots(2, 2, figsize=(16, 12))

        # 1. 年龄组与生存率
        age_bins = [0, 12, 18, 30, 50, 100]
        age_labels = ['Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] if use_english else ['儿童', '青少年',
                                                                                                  '青年', '中年',
                                                                                                  '老年']
        self.df['AgeGroup'] = pd.cut(self.df['Age'], bins=age_bins, labels=age_labels)
        age_survival = pd.crosstab(self.df['AgeGroup'], self.df['Survived'], normalize='index') * 100
        age_survival.plot(kind='bar', ax=axes[0, 0], color=['#ff6b6b', '#4ecdc4'])
        axes[0, 0].set_title('Age Group vs Survival Rate' if use_english else '年龄组与生存率关系',
                             fontproperties=my_font)
        axes[0, 0].set_ylabel('Percentage (%)' if use_english else '比例 (%)', fontproperties=my_font)
        legend_labels = ['Death', 'Survived'] if use_english else ['死亡', '生存']
        axes[0, 0].legend(legend_labels, prop=my_font)
        # 手动设置x轴标签以解决中文乱码问题
        axes[0, 0].set_xticklabels(age_labels, fontproperties=my_font)

        # 2. 家庭规模与生存率
        self.df['FamilySize'] = self.df['SibSp'] + self.df['Parch'] + 1
        family_survival = pd.crosstab(self.df['FamilySize'], self.df['Survived'], normalize='index') * 100
        family_survival.plot(kind='bar', ax=axes[0, 1], color=['#ff6b6b', '#4ecdc4'])
        axes[0, 1].set_title('Family Size vs Survival Rate' if use_english else '家庭规模与生存率关系',
                             fontproperties=my_font)
        axes[0, 1].set_ylabel('Percentage (%)' if use_english else '比例 (%)', fontproperties=my_font)
        axes[0, 1].legend(legend_labels, prop=my_font)

        # 3. 性别+等级的生存率热力图
        gender_class_survival = pd.crosstab([self.df['Sex'], self.df['Pclass']],
                                            self.df['Survived'], normalize='index') * 100
        sns.heatmap(gender_class_survival, annot=True, fmt='.1f', cmap='RdYlBu_r', ax=axes[1, 0])
        axes[1, 0].set_title('Gender + Class Survival Heatmap' if use_english else '性别+等级生存率热力图',
                             fontproperties=my_font)

        # 4. 相关性热力图
        numeric_df = self.df.select_dtypes(include=[np.number])
        correlation_matrix = numeric_df.corr()
        sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0, ax=axes[1, 1])
        axes[1, 1].set_title('Feature Correlation Heatmap' if use_english else '特征相关性热力图',
                             fontproperties=my_font)

        plt.tight_layout()
        filename = 'Advanced_Visualization.png' if use_english else '高级可视化.png'
        plt.savefig(filename, dpi=300, bbox_inches='tight')
        plt.show()

        # 批量设置x轴标签
        for ax in axes.flat:
            ax.set_xticklabels(ax.get_xticklabels(), fontproperties=my_font)

    def feature_engineering(self):
        """特征工程"""
        print("\n" + "=" * 60)
        print("🔧 特征工程")
        print("=" * 60)

        df_engineered = self.df.copy()

        # 处理缺失值
        print("🔧 处理缺失值...")
        df_engineered['Age'].fillna(df_engineered['Age'].median(), inplace=True)
        df_engineered['Embarked'].fillna(df_engineered['Embarked'].mode()[0], inplace=True)
        df_engineered['Fare'].fillna(df_engineered['Fare'].median(), inplace=True)
        df_engineered['Cabin'] = df_engineered['Cabin'].fillna('U')
        df_engineered['Cabin'] = df_engineered['Cabin'].apply(lambda x: x[0] if pd.notna(x) else 'U')

        # 特征提取
        print("🔧 特征提取...")
        # 从姓名中提取头衔
        df_engineered['Title'] = df_engineered['Name'].str.extract(r' ([A-Za-z]+)\.', expand=False)
        rare_titles = ['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona']
        df_engineered['Title'] = df_engineered['Title'].replace(rare_titles, 'Rare')
        df_engineered['Title'] = df_engineered['Title'].replace('Mlle', 'Miss')
        df_engineered['Title'] = df_engineered['Title'].replace('Ms', 'Miss')
        df_engineered['Title'] = df_engineered['Title'].replace('Mme', 'Mrs')

        # 家庭特征
        df_engineered['FamilySize'] = df_engineered['SibSp'] + df_engineered['Parch'] + 1
        df_engineered['IsAlone'] = (df_engineered['FamilySize'] == 1).astype(int)

        # 年龄和票价分箱
        age_bins = [0, 12, 18, 30, 50, 100]
        age_labels = ['Child', 'Teenager', 'Young Adult', 'Adult', 'Senior']
        df_engineered['AgeGroup'] = pd.cut(df_engineered['Age'],
                                           bins=age_bins,
                                           labels=age_labels)
        df_engineered['FareGroup'] = pd.qcut(df_engineered['Fare'], 4, labels=['Low', 'Medium', 'High', 'Very High'])

        # 特征选择
        selected_features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked',
                             'Cabin', 'Title', 'FamilySize', 'IsAlone', 'AgeGroup', 'FareGroup']

        self.X = df_engineered[selected_features]
        self.y = df_engineered['Survived'] if 'Survived' in df_engineered.columns else None

        print(f"✅ 特征工程完成！特征数量: {len(selected_features)}")
        return df_engineered

    def preprocess_data(self):
        """数据预处理"""
        print("\n" + "=" * 60)
        print("⚙️ 数据预处理")
        print("=" * 60)

        # 分离数值型和分类型特征
        numeric_features = self.X.select_dtypes(include=['int64', 'float64']).columns.tolist()
        categorical_features = self.X.select_dtypes(include=['object', 'category']).columns.tolist()

        # 创建预处理管道
        numeric_transformer = Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='median')),
            ('scaler', StandardScaler())
        ])

        categorical_transformer = Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='most_frequent')),
            ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))
        ])

        self.preprocessor = ColumnTransformer(
            transformers=[
                ('num', numeric_transformer, numeric_features),
                ('cat', categorical_transformer, categorical_features)
            ])

        # 划分训练集和测试集
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.X, self.y, test_size=0.2, random_state=42, stratify=self.y
        )

        # 应用预处理
        self.X_train_processed = self.preprocessor.fit_transform(self.X_train)
        self.X_test_processed = self.preprocessor.transform(self.X_test)

        print(f"✅ 数据预处理完成！")
        print(f"📊 训练集: {self.X_train_processed.shape}, 测试集: {self.X_test_processed.shape}")

        return self.X_train_processed, self.X_test_processed, self.y_train, self.y_test

    def train_and_evaluate(self):
        """训练和评估模型"""
        print("\n" + "=" * 60)
        print("🤖 模型训练与评估")
        print("=" * 60)

        # 定义模型
        models = {
            'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000),
            'Random Forest': RandomForestClassifier(random_state=42, n_estimators=100),
            'Gradient Boosting': GradientBoostingClassifier(random_state=42, n_estimators=100),
            'SVM': SVC(random_state=42, probability=True)
        }

        # 训练和评估每个模型
        for name, model in models.items():
            print(f"\n🔧 训练 {name}...")

            # 交叉验证
            cv_scores = cross_val_score(model, self.X_train_processed, self.y_train, cv=5, scoring='accuracy')
            mean_cv_score = cv_scores.mean()
            std_cv_score = cv_scores.std()

            # 训练模型
            model.fit(self.X_train_processed, self.y_train)

            # 预测
            y_pred = model.predict(self.X_test_processed)
            y_pred_proba = model.predict_proba(self.X_test_processed)[:, 1]

            # 评估指标
            test_accuracy = accuracy_score(self.y_test, y_pred)
            cm = confusion_matrix(self.y_test, y_pred)
            report = classification_report(self.y_test, y_pred)

            # 存储结果
            self.results[name] = {
                'model': model,
                'cv_score_mean': mean_cv_score,
                'cv_score_std': std_cv_score,
                'test_accuracy': test_accuracy,
                'confusion_matrix': cm,
                'classification_report': report,
                'y_pred': y_pred,
                'y_pred_proba': y_pred_proba
            }

            print(f"✅ {name} 训练完成")
            print(f"📊 交叉验证准确率: {mean_cv_score:.4f} (±{std_cv_score:.4f})")
            print(f"📊 测试集准确率: {test_accuracy:.4f}")

        # 模型比较
        self._compare_models()

        # 绘制ROC曲线
        self._plot_roc_curves()

        return self.results

    def _compare_models(self):
        """模型比较"""
        print("\n" + "-" * 40)
        print("📊 模型性能比较")
        print("-" * 40)

        comparison_data = []
        for name, result in self.results.items():
            comparison_data.append({
                'Model': name,
                'CV Score': f"{result['cv_score_mean']:.4f} (±{result['cv_score_std']:.4f})",
                'Test Accuracy': f"{result['test_accuracy']:.4f}"
            })

        comparison_df = pd.DataFrame(comparison_data)
        print(comparison_df.to_string(index=False))

        # 找出最佳模型
        best_model = max(self.results.keys(), key=lambda k: self.results[k]['test_accuracy'])
        print(f"\n🏆 最佳模型: {best_model}")
        print(f"📊 最佳准确率: {self.results[best_model]['test_accuracy']:.4f}")

    def _plot_roc_curves(self):
        """绘制ROC曲线"""
        # 检查是否使用英文标签
        use_english = 'USE_ENGLISH_LABELS' in globals() and globals()['USE_ENGLISH_LABELS']

        plt.figure(figsize=(10, 8))

        for name, result in self.results.items():
            fpr, tpr, _ = roc_curve(self.y_test, result['y_pred_proba'])
            roc_auc = auc(fpr, tpr)
            plt.plot(fpr, tpr, lw=2, label=f'{name} (AUC = {roc_auc:.3f})')

        plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate' if use_english else '假正率 (False Positive Rate)', fontproperties=my_font)
        plt.ylabel('True Positive Rate' if use_english else '真正率 (True Positive Rate)', fontproperties=my_font)
        plt.title('ROC Curves Comparison' if use_english else 'ROC曲线比较', fontproperties=my_font)
        plt.legend(loc="lower right", prop=my_font)
        plt.grid(True, alpha=0.3)
        filename = 'ROC_Curves.png' if use_english else 'ROC曲线.png'
        plt.savefig(filename, dpi=300, bbox_inches='tight')
        plt.show()

    def feature_importance_analysis(self):
        """特征重要性分析"""
        print("\n" + "=" * 60)
        print("🔍 特征重要性分析")
        print("=" * 60)

        # 获取特征名称
        feature_names = []
        numeric_features = self.X.select_dtypes(include=['int64', 'float64']).columns.tolist()
        feature_names.extend(numeric_features)

        categorical_features = self.X.select_dtypes(include=['object', 'category']).columns.tolist()
        encoder = self.preprocessor.transformers_[1][1].named_steps['onehot']
        if hasattr(encoder, 'get_feature_names_out'):
            feature_names.extend(encoder.get_feature_names_out(categorical_features))
        else:
            cats = encoder.categories_
            for feat, cat_list in zip(categorical_features, cats):
                feature_names.extend([f"{feat}_{cat}" for cat in cat_list])

        # 分析最佳模型的特征重要性
        best_model_name = max(self.results.keys(), key=lambda k: self.results[k]['test_accuracy'])
        best_model = self.results[best_model_name]['model']

        if hasattr(best_model, 'feature_importances_'):
            importances = best_model.feature_importances_
        elif hasattr(best_model, 'coef_'):
            importances = np.abs(best_model.coef_[0])
        else:
            print("该模型不支持特征重要性分析")
            return

        # 创建特征重要性DataFrame
        importance_df = pd.DataFrame({
            'Feature': feature_names,
            'Importance': importances
        }).sort_values('Importance', ascending=False)

        print(f"\n📊 {best_model_name} 特征重要性 (Top 15):")
        print(importance_df.head(15))

        # 绘制特征重要性图
        use_english = 'USE_ENGLISH_LABELS' in globals() and globals()['USE_ENGLISH_LABELS']

        plt.figure(figsize=(12, 8))
        top_features = importance_df.head(15)
        plt.barh(range(len(top_features)), top_features['Importance'])
        plt.yticks(range(len(top_features)), top_features['Feature'], fontproperties=my_font)
        plt.xlabel('Importance' if use_english else '重要性', fontproperties=my_font)
        title = f'{best_model_name} - Feature Importance (Top 15)' if use_english else f'{best_model_name} - 特征重要性 (Top 15)'
        plt.title(title, fontproperties=my_font)
        plt.gca().invert_yaxis()
        plt.tight_layout()
        filename = 'Feature_Importance.png' if use_english else '特征重要性.png'
        plt.savefig(filename, dpi=300, bbox_inches='tight')
        plt.show()

    def detailed_analysis(self):
        """详细分析报告"""
        print("\n" + "=" * 60)
        print("📋 详细分析报告")
        print("=" * 60)

        best_model_name = max(self.results.keys(), key=lambda k: self.results[k]['test_accuracy'])
        result = self.results[best_model_name]

        print(f"\n🏆 最佳模型: {best_model_name}")
        print(f"📊 测试集准确率: {result['test_accuracy']:.4f}")
        print(f"📊 交叉验证准确率: {result['cv_score_mean']:.4f} (±{result['cv_score_std']:.4f})")

        # 混淆矩阵
        cm = result['confusion_matrix']
        print(f"\n📊 混淆矩阵:")
        print(f"          预测")
        print(f"实际  死亡  生存")
        print(f"死亡   {cm[0, 0]:3d}   {cm[0, 1]:3d}")
        print(f"生存   {cm[1, 0]:3d}   {cm[1, 1]:3d}")

        # 分类报告
        print(f"\n📋 分类报告:")
        print(result['classification_report'])

        # 计算额外指标
        tn, fp, fn, tp = cm.ravel()
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0
        f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

        print(f"\n📈 关键指标:")
        print(f"精确率 (Precision): {precision:.4f}")
        print(f"召回率 (Recall): {recall:.4f}")
        print(f"F1分数: {f1_score:.4f}")

    def run_complete_analysis(self):
        """运行完整分析流程"""
        print("🚀 开始泰坦尼克号生存预测数据分析")
        print("=" * 60)

        # 1. 数据加载与探索
        self.load_and_explore()

        # 2. 数据可视化
        # self.visualize_data()

        # 3. 特征工程
        # self.feature_engineering()

        # 4. 数据预处理
        # self.preprocess_data()

        # 5. 模型训练与评估
        # self.train_and_evaluate()

        # 6. 特征重要性分析
        # self.feature_importance_analysis()

        # 7. 详细分析报告
        # self.detailed_analysis()

        print("\n" + "=" * 60)
        print("🎉 分析完成！")
        print("=" * 60)
        print("📁 生成的文件:")
        print("   - 基础可视化.png")
        print("   - 高级可视化.png")
        print("   - ROC曲线.png")
        print("   - 特征重要性.png")


def main():
    """主函数"""
    analyzer = TitanicAnalyzer('./data/titanic-data.csv')
    analyzer.run_complete_analysis()


if __name__ == "__main__":
    main()
