"""
    资料清洗
        1、移除重复值，无用的资料，或是修正missing data
        2、在不降低效果的情况下，减少资料唯独
        3、试着填补一些遗漏值

"""
from __future__ import print_function, division
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.mllib.regression import LabeledPoint

import time
import os
import csv
from numpy import array

spark = SparkSession.builder.master("local[2]").appName("test").enableHiveSupport().getOrCreate()
sc = spark.sparkContext
# 读取数据
df_train = spark.read.csv("E:\\Python\\pyspark_demo01\\pyspark_data\\titanic_train.csv", header=True)
df_test = spark.read.csv("E:\\Python\\pyspark_demo01\\pyspark_data\\titanic_test.csv", header=True)

# 对读取到的数据进行处理
from pyspark.sql.functions import lit, col

# 给df_train增加一个栏位叫mark值为train
df_train = df_train.withColumn("Mark", lit("train"))
df_test = df_test.withColumn("Survived", lit(0)).withColumn("Mark", lit("test"))

# 将两个数据进行处理整合
df_test = df_test[df_train.columns]
df = df_train.unionAll(df_test)

# 对需要处理的字段名进行类型转换
df = (df.withColumn("Age", df["Age"].cast("double"))
      .withColumn("SibSp", df["SibSp"].cast("double"))
      .withColumn("Parch", df["Parch"].cast("double"))
      .withColumn("Fare", df["Fare"].cast("double"))
      .withColumn("Survived", df["Survived"].cast("double")))

# 对新生成的表进行通过mark进行分组
df.groupBy("Mark").count().show()

# 去除空值
numVars = ['Survived', 'Age', 'SibSp', 'Parch', 'Fare']
missing = dict()
# 寻找df中的空值
for numVar in numVars:
    missing[numVar] = df.where(df[numVar].isNull()).count()
print(missing)

# 对遗漏值进行处理
age_mean = df.groupBy().mean("Age").first()[0]
fare_mean = df.groupBy().mean("Fare").first()[0]
# 将遗漏的年龄用年龄的平均值替换，Parch用平均值替换， 性别默认为male，parch为0，Embarked的值为S
df = df.na.fill({'Age': age_mean, 'Fare': fare_mean, 'Parch': 0, 'Sex': 'male', 'Embarked': 'S'})
"""
na:表示df中的遗漏值

"""

from pyspark.sql.functions import udf
from pyspark.sql.types import StringType

# 从name中提取信息
getTitle = udf(lambda name: name.split('.')[0].strip(), StringType())
df = df.withColumn('Title', getTitle(df["Name"]))

# 处理字符串，在计算的过程中要强求 将字符串替换成数字
from pyspark.ml.feature import StringIndexer

# 将性别转换成 数字
si = StringIndexer(inputCol='Sex', outputCol='Sex_indexed')
df_indexed = si.fit(df).transform(df).drop('Sex').withColumnRenamed("Sex_indexed", 'Sex')

catVars = ['Pclass', 'Sex', 'Embarked', 'Title']


def indexer(df, col):
    si = StringIndexer(inputCol=col, outputCol=col + '_indexed').fit(df)
    return si


indexers = [indexer(df, col) for col in catVars]

from pyspark.ml import Pipeline

pipeline = Pipeline(stages=indexers)
df_indexed = pipeline.fit(df).transform(df)
df_indexed.select("Embarked", "Embarked_indexed").show(3)

