import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}

import org.apache.spark.sql.SparkSession
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.feature.PCA


import org.apache.spark.sql.functions.col

object Air {
 
  def main (args:Array[String]):Unit={
    val conf=new SparkConf().setMaster("local").setAppName("t1")
    val sc =new SparkContext(conf)
    sc.setLogLevel("WARN")
    
    val ss =SparkSession.builder()
    .master("local")
    .appName("t2")
    .getOrCreate()
    ss.sparkContext.setLogLevel("WARN")
    import ss.implicits._ 
    
    //air_data.csv文件路径
    val path ="data/air_data.csv"
    
    //原文件没有用utf-8编码，但是不影响必需属性值
    val lines = ss.read.option("header","true").csv(path)
    lines.show()
    
    //统计空值
    val columns=lines.columns
    val cnt=lines.count()
 
    // 统计每列的缺失记录数
    val missing_cnt=columns.map(x=>lines.select(col(x)).where(col(x).isNull).count)
 
    // 统计每列的缺失率，并保留4位小数
    val missing_rate=columns.map(x=>((lines.select(col(x)).where(col(x).isNull).count.toDouble/cnt).formatted("%.4f")))
 
    // 将列名和缺失值拼接起来，组成一个dataframe
    val  result1=sc.parallelize(columns.zip(missing_cnt)).toDF("column_name","missing_cnt")
    val  result2=sc.parallelize(columns.zip(missing_rate)).toDF("column_name","missing_rate")
    //保存文件
    result1.show()
    result1.coalesce(1)//文件分区设置为1
    .write.mode("overwrite")//保存方式为覆盖
    .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false")//保存csv文件时去除success文件
    .option("header","true")//保存表列名
    .csv("data/air_data/missing_cnt")//保存路径
    
    
    result2.show()
    result2.coalesce(1)//文件分区设置为1
    .write.mode("overwrite")//保存方式为覆盖
    .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false")//保存csv文件时去除success文件
    .option("header","true")//保存表列名
    .csv("data/air_data/missing_rate")//保存路径
    //去除空值
    
    // 去除SUM_YR_1空值
    val resNull=lines.na.drop(Array("SUM_YR_1"))
    
    // 去除SUM_YR_2空值
    val resNull1=resNull.na.drop(Array("SUM_YR_2"))
    resNull1.createTempView("List1")
    resNull.show()
    
    
    //去除票价为0、平均折扣率不为0、总飞行公里数等于0的记录
    val list1=ss.sql("select  FFP_DATE,LOAD_TIME, LAST_TO_END, FLIGHT_COUNT,  SEG_KM_SUM,  avg_discount from List1 where (SUM_YR_1!=0)or(SUM_YR_2!=0)or(SEG_KM_SUM=0 and avg_discount=0)")
    list1.show
    list1.createTempView("List2") 
    
    // 保存文件
    list1.coalesce(1)//文件分区设置为1
    .write.mode("overwrite")//保存方式为覆盖
    .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false")//保存csv文件时去除success文件
    .option("header","true")//保存表列名
    .csv("data/air_data/sixData")//保存路径
    
    //我们所需的五个属性
    val list2=ss.sql("     SELECT (CAST(((cast(CAST(UNIX_TIMESTAMP(LOAD_TIME, 'yyyy/MM/dd') AS TIMESTAMP) as long)-cast( CAST(UNIX_TIMESTAMP(FFP_DATE, 'yyyy/MM/dd') AS TIMESTAMP) as long))/86400/30) as decimal(38,2))) as L,LAST_TO_END as R,FLIGHT_COUNT as F,SEG_KM_SUM as M,CAST(AVG_DISCOUNT as decimal(38, 2))as C  from List2")
    list2.show
    //文件保存

    list2.coalesce(1)//文件分区设置为1
    .write.mode("overwrite")//保存方式为覆盖
    .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false")//保存csv文件时去除success文件
    .option("header","true")//保存表列名
    .csv("data/air_data/fiveData")//保存路径


    
}
}