package 项目三数据预处理

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}


/**
 * @author 35192
 * @date 2021-06-08 16:26
 */
object partOne {

  // 创建环境
  val conf = new SparkConf()
    .setAppName("log_analysis")
    .setMaster("local[6]")
  val sc = new SparkContext(conf)
  // 设置日志级别
  sc.setLogLevel("ERROR")

  def main(args: Array[String]): Unit = {

    //    // 创建环境
    //    val conf = new SparkConf().setMaster("local[6]").setAppName("HDFS")
    //    val sc   = new SparkContext(conf)
    //
    //    // 读取数据
    //    val hdfs = sc.textFile("hdfs://192.168.64.178:9000/spark/zhaopin.txt")
    //    // 展示数据集
    //    hdfs.foreach(println(_))

    val spark = SparkSession.builder().appName("HDFS_sql").master("local[6]").getOrCreate()
    import org.apache.spark.sql.functions._
    import spark.implicits._
    // 指定数据结构信息去读取数据
    val schema = StructType(
      List(
        StructField("职位名称", StringType),
        StructField("薪水", StringType),
        StructField("工作年限", StringType),
        StructField("城市", StringType),
        StructField("公司简介", StringType),
        StructField("公司规模", StringType),
        StructField("所在区", StringType),
        StructField("融资阶段", StringType),
        StructField("所在领域", StringType),
        StructField("职位类型", StringType),
        StructField("简历日处理", IntegerType),
        StructField("简历处理率", IntegerType)
      )
    )
    // 读取数据
    val df = spark.read
      .schema(schema) // 使用指定的schema
      .option("header", value = true) // 指定首行为字段
     // .csv("hdfs://192.168.64.178:9000/spark/zhaopin.txt")
    .csv("G:\\Projects\\IdeaProjects\\Spark_Competition\\src\\main\\scala\\大数据应用赛_2020\\zhaopin1.txt")
    df.show()

    val df_ = df.distinct()

    df_.coalesce(1)
      .write.mode("overwrite")
      .option("header", "true")
      .csv("C:\\Users\\35192\\Desktop\\df_")

    // 提取不为null的记录
    val df_index = df_.select('*)
      .where('职位名称 =!= "null")
    df_index.show()

    // 增加一列index
    val window = Window.orderBy("城市")
    val new_df = df_index.withColumn("index", row_number().over(window))
    new_df.show()

    new_df.write.mode("overwrite")
      .option("header", "true")
      .csv("C:\\Users\\35192\\Desktop\\new_df")

    // 取出薪水信息进行预处理
    val salary = new_df.select('薪水, 'index)
    salary.show()
    salary.foreach(println(_))

    salary.write.mode("overwrite")
      .option("header", "true")
      .csv("C:\\Users\\35192\\Desktop\\salary")


    // 信息过滤
    val filter = salary
      .map {
        item =>
          var line1 = item.toString().split('[')(1);
          var line2 = line1.toString().split("]")(0); //..k-..k,index
          var line3 = line2.toString().split(",")(1); // index
          var line4 = line2.toString().split(",")(0); // ..k-..k

          var sub_end2_0 = line4.toString().split("-")(0).toString.length - 1;
          var sub_end2_1 = line4.toString().split("-")(1).toString.length - 1;
          if (line4.toString.contains("*")) {
            var base = line4.split("\\*")(0);
            var sub_endb_0 = base.toString().split("-")(0).toString.length - 1;
            var sub_endb_1 = base.toString().split("-")(1).toString.length - 1;
            var min = base.toString().split("-")(0).substring(0, sub_endb_0).toInt;
            var max = base.toString().split("-")(1).substring(0, sub_endb_1).toInt;
            (min, max, line3)
          } else if (line4.toString().contains("-")) {
            var min = line4.toString().split("-")(0).substring(0, sub_end2_0).toInt;
            var max = line4.toString().split("-")(1).substring(0, sub_end2_1).toInt;
            (min, max, line3)
          } else {
            (-1, -1, line3)
          }
      }
      .toDF("min_salary", "max_salary", "index")
    filter.show()

    filter.write.mode("overwrite")
      .option("header", "true")
      .csv("C:\\Users\\35192\\Desktop\\filter")


    // 合并结果
    val full_data = new_df
      .join(filter, new_df.col("index") === filter.col("index"), "left_outer")
      .select('职位名称 as "positionName",
        '薪水 as "salary", 'min_salary, 'max_salary,
        '工作年限 as "workYear", '城市 as "city",
        '公司简介 as "companyShortName", '公司规模 as "companySize", '所在区 as "district",
        '融资阶段 as "financeStage", '所在领域 as "industryField", '职位类型 as "thirdType",
        '简历日处理 as "resumeProcessDay", '简历处理率 as "resumeProcessRate")
      .where('positionName =!= "null")
      .where('workYear =!= "null")
      .where('city =!= "null")
      .where('companyShortName =!= "null")
      .where('companySize =!= "null")
      .where('district =!= "null")
      .where('financeStage =!= "null")
      .where('industryField =!= "null")
      .where('thirdType =!= "null")
      .where('resumeProcessDay =!= Double.NaN)
    full_data.show()

    full_data.write.mode("overwrite")
      .option("header", "true")
      .csv("C:\\Users\\35192\\Desktop\\full_data")

    full_data.write
      .option("header", "true")
      .csv("hdfs://192.168.64.178:9000/spark/clean")
  }
}
