# -*- coding: utf-8 -*-
"""
Created on Mon Nov 29 22:44:56 2021

@author: hasee
"""
#调包
import numpy as np
import pandas as pd
#导入数据集
features= pd.read_csv('C:/Users/86152/Documents/WPSDrive/1122396675/WPS云盘/550A/机器学习实验/titanic_dataset.csv')
#把标签和特征分开
y_train1=features['Survived']
X_train=features.drop('Survived',axis=1)
#获得特征的大小
(a,b)=X_train.shape
#获得标签的长度
a1=y_train1.shape
#查看特征集的缺失值情况
X_train.info()
# 缺失值合计
X_train.isnull().sum()
y_train1.isnull().sum()
# 待处理的缺失值
#X_train.Age
# X_train.Cabin
# X_train.Embarked
# X_train.Fare
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = ['SimHei'] 
fig1=plt.figure()

ax=fig1.add_subplot(2,2,1)
plt.title('原数据集')

# 先看下数据集的 Age 分布状态
sns.distplot(X_train['Age'].dropna(), hist=True, kde=True,axlabel=None)

# 将数据集中的NaN数据使用中值填充。
Xtrain1=X_train.copy()
#np.nanmedian沿指定轴计算中位数，而忽略NaN。
Xtrain1['Age'].replace(np.nan, np.nanmedian(Xtrain1['Age']),inplace=True)

#sns.distplot(Xtrain1['Age'].dropna(), hist=True, kde=True)
#性别有关的中位数
age_sex_median=X_train.groupby('Sex').Age.median()
Xtrain2=X_train.set_index('Sex')
Xtrain2.Age.fillna(age_sex_median,inplace=True)
Xtrain2.reset_index(inplace=True)
Xage2=Xtrain2['Age']
#同时考虑性别和仓位
age_Pclass=X_train.groupby(['Pclass','Sex']).Age.median()
X_train.set_index(['Pclass','Sex'],inplace=True)
X_train.Age.fillna(age_Pclass,inplace=True)
X_train.reset_index(inplace=True)

meanage=[[],[],[]]
meanage[0]=X_train.Age.mean()
meanage[1]=Xtrain1.Age.mean()
meanage[2]=Xtrain2.Age.mean()
ax=fig1.add_subplot(2,2,2)
plt.title('同时考虑性别和仓位')

sns.distplot(X_train['Age'], hist=True, kde=True)
ax=fig1.add_subplot(2,2,3)
plt.title('性别中位数')

sns.distplot(Xtrain1['Age'], hist=True, kde=True)
ax=fig1.add_subplot(2,2,4)
plt.title('中位数')
plt.xlabel('')
sns.distplot(Xtrain2['Age'], hist=True, kde=True)
plt.subplots_adjust(wspace=0,hspace=0.5)
plt.show()

# Cabin 的缺失值太多，从 Dataframe 中移除后，也不会影响预测的
X_train.drop("Cabin", axis=1, inplace=True)
# 我们来看下乘客都在哪些站登船的
# S 表示：Southampton，英国南安普敦
# C 表示：Cherbourg-Octeville，法国瑟堡-奥克特维尔
# Q 表示：Queenstown，爱尔兰昆士敦
X_train.Embarked.value_counts()
# 登船情况
fig5=plt.figure()
fig5=sns.countplot(x='Embarked', data=X_train)
X_train['Embarked'].replace(np.nan, 'S', inplace=True)
# 数据集有一个缺失数据，我们把它找出来，然后附上中值
X_train[np.isnan(X_train["Fare"])]
# 查询从 英国南安普敦 上船，级别是3的船票价格
pclass3_fares = X_train.query('Pclass == 3 & Embarked == "S"')['Fare']
# 先将空值填充为0
pclass3_fares = pclass3_fares.replace(np.nan, 0)
# 然后取中值
median_fare = np.median(pclass3_fares)
# 最后更新中值到缺失值的那处
X_train.loc[X_train['PassengerId'] == 1044, 'Fare'] = median_fare
# 查看这个为乘客
X_train.loc[X_train['PassengerId'] == 1044]
X_train['Sex'].replace(['male', 'female'], [1,0], inplace=True)
X_train.isnull().sum()
#转化为计算机可以识别的
X_train['Embarked'].replace(['S', 'C','Q'], [1,2,3], inplace=True)
#数据集中name和ID重合了，删掉name
data=X_train.drop('Name',axis=1)
#也是重合了删掉船票编号
data.drop('Ticket',axis=1,inplace=True)
data.drop('PassengerId',axis=1,inplace=True)
data['2']=y_train1
corr = np.corrcoef(data,rowvar=False)
data.drop('2',axis=1,inplace=True)
#PCA降维
from sklearn import preprocessing
from sklearn.decomposition import PCA
mms=preprocessing.MinMaxScaler()
data=mms.fit_transform(data)
pca = PCA()
X_pca=pca.fit_transform(data)
#PCA贡献率ratio
ratio=pca.explained_variance_ratio_
##################################
fig2=plt.figure()
red_x,red_y=[],[]
blue_x,blue_y=[],[]
y_train1=np.array(y_train1)
for i in range(len(X_pca)):
    if y_train1[i]==0:
        red_x.append(X_pca[i][0])
        red_y.append(X_pca[i][1])
    else: 
        blue_x.append(X_pca[i][0])
        blue_y.append(X_pca[i][1])
plt.scatter(red_x,red_y,c='r',label='death')
plt.scatter(blue_x,blue_y,c='blue',label='Survived')
########################################################

#原数据集进行数据挖掘
#1.KNN邻近算法 2.SVM支持向量机 3.Tree决策树
#调分割数据集的包
from sklearn.model_selection import train_test_split
#KNN的包
from sklearn.neighbors import KNeighborsClassifier
#把原数据集分割成训练集train和测试集test
#时间
import time


data1=data[:,[0,1,4]]
def evolution(data,y_train1):
    X_train, X_test, y_train, y_test = train_test_split(data, y_train1, test_size=0.2,random_state=50)
    if X_train.size==y_train.size:
        X_train=X_train.reshape(len(X_train),1)
    #把kNN函数加参数定义的小点
    start=time.time()
    knn = KNeighborsClassifier(n_neighbors=3)
    #拟合训练集
    knn.fit(X_train, y_train)
    #用测试集算得分
    knn_score=knn.score(X_test,y_test)

    end=time.time()
    knn_time=end-start
    #支持向量机
    from sklearn import svm
    start=time.time()
    clf=svm.SVC(kernel='linear')
    clf.fit(X_train, y_train)
    svm_score=clf.score(X_test,y_test)
    end=time.time()
    svm_time=end-start
    #决策树
    from sklearn import tree
    start=time.time()
    tre=tree.DecisionTreeClassifier()
    tre=tre.fit(X_train, y_train)
    tre_score=tre.score(X_test,y_test)
    end=time.time()
    tree_time=end-start
    score1=[[],[],[]]
    score1[0]=knn_score
    score1[1]=svm_score
    score1[2]=tre_score
    time1=[[],[],[]]
    time1[0]=knn_time
    time1[1]=svm_time
    time1[2]=tree_time
    return score1,time1
score_original,time_original=evolution(data,y_train1)
score1,time1=evolution(data1,y_train1)
pcascore,pcatime=evolution(X_pca[:,0:2],y_train1)

#LDA降维 数据挖掘
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#定义的小一点
lda = LinearDiscriminantAnalysis()
#拟合
lda.fit(data,y_train1)
#转化
X_lda = lda.transform(data)
#LDA的贡献率
ratio1=lda.explained_variance_ratio_
################################
fig2=plt.figure()
red_x=[]
blue_x=[]
y_train1=np.array(y_train1)
for i in range(len(X_pca)):
    if y_train1[i]==0:
        red_x.append(X_lda[i][0])
    else: 
        blue_x.append(X_lda[i][0])
plt.scatter(red_x,red_x,c='r',label='death')
plt.scatter(blue_x,blue_x,alpha=0.5,c='blue',label='Survived')
###################################################################
ldascore,ldatime=evolution(X_lda,y_train1)
fig6=plt.figure()
plt.title('泰坦尼克分类算法准确率对比')
plt.xticks([0, 1, 2],['knn', 'svm', 'tree'])
plt.plot(ldascore,c='r',label='lda')
plt.plot(pcascore,c='blue',label='pca')
plt.plot(score_original,c='green',label='normal')
plt.plot(score1,c='black',label='corr')
plt.legend()
plt.show()
fig7=plt.figure()
plt.title('泰坦尼克分类算法耗时对比')
plt.xticks([0, 1, 2],['knn', 'svm', 'tree'])
plt.plot(ldatime,c='r',label='lda')
plt.plot(pcatime,c='blue',label='pca')
plt.plot(time_original,c='green',label='normal')
plt.plot(time1,c='black',label='corr')
plt.legend()
plt.show()
from sklearn import svm
X_train, X_test, y_train, y_test = train_test_split(data, y_train1, test_size=0.2,random_state=50)
clf=svm.SVC(kernel='linear')
clf.fit(X_train, y_train)
svm_score=clf.decision_function(X_test)
