

#调包
import numpy as np
import pandas as pd
#导入数据集
features= pd.read_csv('E:/PROJECT_Dynasty2023/project_-dynasty2023/Project_MachineLearning/titanic_dataset.csv')
#删cabin
features=features.drop(['Cabin'],axis=1)
#删缺失值dropna
features=features.dropna(axis=0, how='any')
print(features)

#把标签和特征分开
y_train1=features['Survived']
X_train=features.drop('Survived',axis=1)
#############################################
#获得特征的大小
(a,b)=X_train.shape
#获得标签的长度
a1=y_train1.shape
#查看特征集的缺失值情况
X_train.info()
# 缺失值合计
X_train.isnull().sum()
y_train1.isnull().sum()
# 待处理的缺失值
#X_train.Age
# X_train.Cabin
#X_train.Embarked
#X_train.Fare
import seaborn as sns
#############################
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = ['SimHei']

X_train['Embarked'].replace(np.nan, 'S', inplace=True)#填补缺失值
# Cabin 的缺失值太多，从 Dataframe 中移除后，也不会影响预测的
# X_train.drop("Cabin", axis=1, inplace=True)
# 我们来看下乘客都在哪些站登船的
# S 表示：Southampton，英国南安普敦
# C 表示：Cherbourg-Octeville，法国瑟堡-奥克特维尔
# Q 表示：Queenstown，爱尔兰昆士敦
# 查询从 英国南安普敦 上船，级别是3的船票价格
pclass3_fares = X_train.query('Pclass == 3 & Embarked == "S"')['Fare']
# 先将空值填充为0
pclass3_fares = pclass3_fares.replace(np.nan, 0)
# 然后取中值
median_fare = np.median(pclass3_fares)
# 最后更新中值到缺失值的那处
X_train.loc[X_train['PassengerId'] == 1044, 'Fare'] = median_fare
X_train['Sex'].replace(['male', 'female'], [1,0], inplace=True)
X_train.isnull().sum()
#转化为计算机可以识别的
X_train['Embarked'].replace(['S', 'C','Q'], [1,2,3], inplace=True)
#数据集中name和ID重合了，删掉name
data=X_train.drop('Name',axis=1)
#也是重合了删掉船票编号
data.drop('Ticket',axis=1,inplace=True)
data.drop('PassengerId',axis=1,inplace=True)
data.info()
#PCA
from sklearn import preprocessing
from sklearn.decomposition import PCA
mms=preprocessing.MinMaxScaler()
data=mms.fit_transform(data)
pca = PCA(n_components=3)
X_pca = pca.fit_transform(data)
#贡献率
ratio = pca.explained_variance_ratio_
y_train1=np.array(y_train1)

#原数据集进行数据挖掘
#1.KNN邻近算法 2.SVM支持向量机 3.Tree决策树
#调分割数据集的包
from sklearn.model_selection import train_test_split
#KNN的包
from sklearn.neighbors import KNeighborsClassifier
#logerg
from sklearn.linear_model import LogisticRegression

#把原数据集分割成训练集train和测试集test
#时间
import time
cm = confusion_matrix(y_test,y_pred_svm)

y_train1=np.array(y_train1)
from sklearn.model_selection import train_test_split
#KNN的包
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.linear_model import LogisticRegression
X_train, X_test, y_train, y_test = train_test_split(X_pca, y_train1, test_size=0.2,random_state=50)
svm=svm.SVC(kernel='linear')
svm.fit(X_train[:,0:3],y_train)
LogisticRegression=LogisticRegression(max_iter=1000)
LogisticRegression.fit(X_train[:,0:3],y_train)
KNeighborsClassifier=KNeighborsClassifier(n_neighbors=1)
KNeighborsClassifier.fit(X_train[:,0:3],y_train)


knn_score=KNeighborsClassifier.score(X_test[:,0:3],y_test)
y_score_svm=svm.score(X_test[:,0:3],y_test)
y_score_lr=LogisticRegression.score(X_test[:,0:3],y_test)


y_pred_knn=KNeighborsClassifier.predict(X_test[:,0:3])
y_pred_svm=svm.predict(X_test[:,0:3])
y_pred_lr=LogisticRegression.predict(X_test[:,0:3])
print(y_score_lr)
print(y_score_svm)
print(knn_score)

