import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression,LogisticRegression
from sklearn.model_selection import KFold
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings('ignore')

def data_preprocess():
    # 为了显示完整的数据输出图
    pd.set_option('display.max_columns', 1000)
    pd.set_option('display.width', 1000)
    pd.set_option('display.max_colwidth', 1000)
    data = pd.read_csv('train.csv')

    # 将缺失年龄进行填充
    data['Age'] = data['Age'].fillna(data['Age'].median())
    # 选取一些作为预测的特征
    predicators = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']
    return data,predicators

def Liner(data_train , predicators):
    # 初始化回归算法
    alg = LinearRegression()
    # 进行三折交叉验证
    kf = KFold(3,False,1)
    predictions =[]
    for train,test in kf.split(data_train):
        #  在训练集中提取训练数据和训练目标
        train_preditors = data_train[predicators].iloc[train,:]
        train_target = data_train['Survived'].iloc[train]
        # 开始训练模型
        alg.fit(train_preditors,train_target)
        # 提取测试集部分的数据
        test_preditors = alg.predict(data_train[predicators].iloc[test,:])
        predictions.append(test_preditors)
    # 数组的纵向拼接,因为3部分数据其实为一个
    predictions = np.concatenate(predictions,axis=0)
    predictions[predictions > 0.5] = 1
    predictions[predictions <= 0.5] = 0
    accuracy = sum(predictions == data_train["Survived"]) / len(predictions)
    print("线性回归准确率为: ")
    print(accuracy)

def logic(data_train,predicators):
    LogRegAlg =  LogisticRegression(random_state=1)
    LogRegAlg.fit(data_train[predicators],data_train['Survived'])
    # 使用sklearn库里面的交叉验证函数获取预测准确率分数
    scores = model_selection.cross_val_score(LogRegAlg,data_train[predicators],data_train['Survived'],cv=3)
    # 使用交叉验证分数的平均值作为最终的准确率
    print("逻辑回归准确率为: ", scores.mean())

def newLogic(data_train):
    predicators = ['Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare','Embarked']
    # Sex性别列处理：male用0，female用1
    data_train.loc[data_train["Sex"] == "male", "Sex"] = 0
    data_train.loc[data_train["Sex"] == "female", "Sex"] = 1
    # 缺失值用最多的S进行填充
    data_train["Embarked"] = data_train["Embarked"].fillna('S')
    # 地点用0,1,2
    data_train.loc[data_train["Embarked"] == "S", "Embarked"] = 0
    data_train.loc[data_train["Embarked"] == "C", "Embarked"] = 1
    data_train.loc[data_train["Embarked"] == "Q", "Embarked"] = 2
    print("增加条件后:")
    logic(data_train,predicators=predicators)

def randomForest(data_train):
    predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
    # Sex性别列处理：male用0，female用1
    data_train.loc[data_train["Sex"] == "male", "Sex"] = 0
    data_train.loc[data_train["Sex"] == "female", "Sex"] = 1
    # 缺失值用最多的S进行填充
    data_train["Embarked"] = data_train["Embarked"].fillna('S')
    # 地点用0,1,2
    data_train.loc[data_train["Embarked"] == "S", "Embarked"] = 0
    data_train.loc[data_train["Embarked"] == "C", "Embarked"] = 1
    data_train.loc[data_train["Embarked"] == "Q", "Embarked"] = 2
    # 10棵决策树，停止的条件：样本个数为2，叶子节点个数为1
    alg = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2, min_samples_leaf=1)
    # 进行数据集的划分
    kf = model_selection.KFold(n_splits=5, shuffle=False, random_state=1)
    # 模型，训练数据，训练目标，数据集-----进行评分
    scores = model_selection.cross_val_score(alg, data_train[predictors], data_train["Survived"], cv=kf)
    print('随机森林准确率：')
    # Take the mean of the scores (because we have one for each fold)
    print(scores.mean())

if __name__ == '__main__':
    data_train,predicators = data_preprocess()
    Liner(data_train,predicators)
    logic(data_train,predicators)
    newLogic(data_train)
    data_train, predicators = data_preprocess()
    randomForest(data_train)