# coding: utf-8

# 引入计算包
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit, cross_val_score
from sklearn import preprocessing
from sklearn.metrics import classification_report
import joblib

# 定义变量
train_file = 'data/train.csv'
test_file = 'data/test.csv'
MODEL_PATH = './'
SUBMIT_PATH = './'
seed = 0


def clean_and_munge_data(df):
    le = preprocessing.LabelEncoder()

    # 处理缺省值
    df.Fare = df.Fare.map(lambda x: np.nan if x == 0 else x)

    # 处理一下名字，生成Title字段
    title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
                  'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess',
                  'Don', 'Jonkheer']

    def substrings_in_string(big_string, substrings):
        for substring in substrings:
            if substring in big_string:
                return substring
        return np.nan

    df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list))

    # 处理特殊的称呼，全处理成mr, mrs, miss, master
    def replace_titles(x):
        title = x['Title']
        if title in ['Mr', 'Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
            return 'Mr'
        elif title in ['Master']:
            return 'Master'
        elif title in ['Countess', 'Mme', 'Mrs']:
            return 'Mrs'
        elif title in ['Mlle', 'Ms', 'Miss']:
            return 'Miss'
        elif title == 'Dr':
            if x['Sex'] == 'Male':
                return 'Mr'
            else:
                return 'Mrs'
        elif pd.isna(title) or title == '':
            if x['Sex'] == 'Male':
                return 'Mr'
            else:
                return 'Miss'
        else:
            return title

    df['Title'] = df.apply(replace_titles, axis=1)

    # 看看家族是否够大
    df['Family_Size'] = df['SibSp'] + df['Parch']

    # 处理 Fare 缺失值
    df['Fare'] = df.groupby('Pclass')['Fare'].transform(lambda x: x.fillna(x.median()))

    # 性别编码
    df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)

    # 年龄填充
    df['AgeFill'] = df.groupby('Title')['Age'].transform(lambda x: x.fillna(x.median()))

    df['AgeCat'] = pd.cut(df['AgeFill'], bins=[-1, 10, 30, 60, np.inf], labels=['child', 'adult', 'senior', 'aged'])

    # 登船港口填充
    df['Embarked'] = df['Embarked'].fillna('S')

    # Cabin 是否存在
    df['Cabin'] = df['Cabin'].notnull().astype(int)

    # 每人票价
    df['Fare_Per_Person'] = df['Fare'] / (df['Family_Size'] + 1)

    # AgeClass 和 ClassFare
    df['AgeClass'] = df['AgeFill'] * df['Pclass']
    df['ClassFare'] = df['Pclass'] * df['Fare_Per_Person']

    # 高低价位分组
    df['HighLow'] = pd.cut(df['Fare_Per_Person'], bins=[-1, 8, np.inf], labels=['Low', 'High'])

    # Label Encoding
    for col in ['Sex', 'Ticket', 'Title', 'HighLow', 'AgeCat', 'Embarked']:
        le.fit(df[col])
        df[col] = le.transform(df[col]).astype(float)

    # 删除无用列
    df = df.drop(['PassengerId', 'Name', 'Age', 'Cabin'], axis=1)

    return df


if __name__ == '__main__':
    # 读取数据
    train_df = pd.read_csv(train_file)
    test_df = pd.read_csv(test_file)

    # 数据清洗
    df_train = clean_and_munge_data(train_df)
    df_test = clean_and_munge_data(test_df)

    # 手动选择特征（不需要 patsy）
    features = ['Pclass', 'Title', 'Sex', 'AgeCat', 'Fare_Per_Person', 'Fare', 'Family_Size', 'Embarked','Cabin']
    X_train = df_train[features]
    y_train = df_train['Survived']

    # 划分训练/验证集
    X_train_split, X_test_split, y_train_split, y_test_split = train_test_split(
        X_train.values, y_train.values.ravel(), test_size=0.2, random_state=seed
    )

    # 初始化分类器（选择随机森林）
    clf = RandomForestClassifier(n_estimators=500, criterion='entropy', max_depth=5,
                                 min_samples_split=2, min_samples_leaf=1,
                                 max_features='sqrt', bootstrap=False, n_jobs=-1,
                                 random_state=seed, verbose=0)
    # 参数搜索
    param_grid = {
        'n_estimators': [100, 200, 300, 400, 500],
        'max_depth': [3, 5, 7, 9, 11],
        'min_samples_split': [2, 4, 6, 8, 10],
        'min_samples_leaf': [1, 2, 3, 4, 5]
    }
    grid_search = GridSearchCV(clf, param_grid=param_grid, scoring='accuracy',
                               cv=StratifiedShuffleSplit(n_splits=10, test_size=0.2, random_state=seed),
                               verbose=3).fit(X_train_split, y_train_split)

    # 输出最佳模型信息
    print("Best estimator:", grid_search.best_estimator_)
    print("Best score:", grid_search.best_score_)
    print("Best params:", grid_search.best_params_)

    # 评估
    print('\nOn all train set:')
    scores = cross_val_score(grid_search.best_estimator_, X_train_split, y_train_split, cv=3, scoring='accuracy')
    print(scores.mean(), scores)

    print('\nOn test set:')
    scores = cross_val_score(grid_search.best_estimator_, X_test_split, y_test_split, cv=3, scoring='accuracy')
    print(scores.mean(), scores)

    # 分类报告
    print("\nClassification Report on Train Set:")
    print(classification_report(y_train_split, grid_search.best_estimator_.predict(X_train_split)))

    print("\nClassification Report on Test Set:")
    print(classification_report(y_test_split, grid_search.best_estimator_.predict(X_test_split)))

    # 保存模型
    model_file = MODEL_PATH + 'model-rf.pkl'
    joblib.dump(grid_search.best_estimator_, model_file)
