
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.Encoder
import spark.implicits._
/*步骤一*/

val rawRDD = spark.sparkContext.textFile("file:///usr/local/spark/scaladata/scala/employee_data.txt")


/*步骤二*/

case class Employee(ID: Int, name: String, age: Int, gender: String, joinDate: String)
val rawTextRDD = rawRDD.zipWithIndex().filter(_._2 > 0).map(_._1).map(_.split(",")).map(x => Employee(x(0).toInt, x(1), x(2).toInt, x(3), x(4)))
val employeeDF = rawTextRDD.toDF("ID", "name", "age", "gender", "joinDate")
employeeDF.printSchema()
employeeDF.show(5)


/*步骤三*/

import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.Encoder
import spark.implicits._
val rawRDD = spark.sparkContext.textFile("file:///usr/local/spark/scaladata/scala/employee_data.txt")
case class Employee(ID: Int, name: String, age: Int, gender: String, joinDate: String)
val rawTextRDD = rawRDD.zipWithIndex().filter(_._2 > 0).map(_._1).map(_.split(",")).map(x => Employee(x(0).toInt, x(1), x(2).toInt, x(3), x(4)))
val employeeDF = rawTextRDD.toDF("ID", "name", "age", "gender", "joinDate")
employeeDF.printSchema()
employeeDF.show(5)

import org.apache.spark.sql.functions._
val dataFormattedDF = employeeDF.withColumn("joinData",to_date($"joinDate","yyyy-MM-dd"))

val avgAge = dataFormattedDF.agg(avg($"age")).head.getAs[Double](0)
print(s"公司员司员工的平均年龄：$avgAge")

val avgAgeByGender = dataFormattedDF.groupBy("gender").count()
avgAgeByGender.show()

val femaleCount = dataFormattedDF.filter($"gender" === "Female").count()
print(s"公司员司女性员工人数：$femaleCount")








