package 大数据应用赛_2020

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

object Exam1_1 {
  def main(args: Array[String]): Unit = {
    // 创建环境
    val conf = new SparkConf()
      .setAppName("log_analysis")
      .setMaster("local[6]")
    val sc = new SparkContext(conf)
    // 设置日志级别
    sc.setLogLevel("ERROR")
    val spark = SparkSession.builder().appName("exam_1").master("local[6]").getOrCreate()
    import org.apache.spark.sql.functions._
    import spark.implicits._
    // 指定数据结构信息去读取数据
    val schema = StructType(
      List(
        StructField("职位名称",StringType),
        StructField("薪水",StringType),
        StructField("工作年限",StringType),
        StructField("城市",StringType),
        StructField("公司简介",StringType),
        StructField("公司规模",StringType),
        StructField("所在区",StringType),
        StructField("融资阶段",StringType),
        StructField("所在领域",StringType),
        StructField("职位类型",StringType),
        StructField("简历日处理",IntegerType),
        StructField("简历处理率",IntegerType)
      )
    )
    // 读取数据
    val df = spark.read
      .schema(schema)                        // 使用指定的schema
      .option("header",value = true)         //
      .csv("G:\\Projects\\IdeaProjects\\Spark_Competition\\src\\main\\scala\\大数据应用赛_2020\\zhaopin.txt")
    df.show()

    // 增加一列id
    val window = Window.orderBy("城市")
    val df_index = df.withColumn("index", row_number().over(window))
    df_index.show()

    // 取出薪水信息进行预处理
    val salary = df_index.select('薪水, 'index)
    salary.show()
    salary.foreach(println(_))
    // 信息过滤
    val filter = salary
      .map {
        item =>
          var line1 = item.toString().split('[')(1);
          var line2 = line1.toString().split("]")(0); //..k-..k,index
          var line3 = line2.toString().split(",")(1); // index
          var line4 = line2.toString().split(",")(0); // ..k-..k

          var sub_end2_0 = line4.toString().split("-")(0).toString.length - 1;
          var sub_end2_1 = line4.toString().split("-")(1).toString.length - 1;
          if (line4.toString.contains("*")) {
            var base = line4.split("\\*")(0);
            var sub_endb_0 = base.toString().split("-")(0).toString.length - 1;
            var sub_endb_1 = base.toString().split("-")(1).toString.length - 1;
            var min = base.toString().split("-")(0).substring(0,sub_endb_0).toInt;
            var max = base.toString().split("-")(1).substring(0,sub_endb_1 ).toInt;
            (min, max, line3)
          } else if (line4.toString().contains("-")) {
            var min = line4.toString().split("-")(0).substring(0, sub_end2_0).toInt;
            var max = line4.toString().split("-")(1).substring(0, sub_end2_1).toInt;
            (min, max, line3)
          } else {
            (-1, -1, line3)
          }
      }
      .toDF("min_salary", "max_salary", "index")
    filter.show(100)



//    full_data.write
//        .option("header","true")val full_data = df_index.join(filter, df_index.col("index") === filter.col("index"), "left_outer")
    //      .select('职位名称 as "positionName", '薪水 as "salary", 'min_salary, 'max_salary, '工作年限 as "workYear",
    //        '城市 as "city", '公司简介 as "companyShortName", '公司规模 as "companySize", '所在区 as "district", '融资阶段 as "financeStage",
    //        '所在领域 as "industryField", '职位类型 as "thirdType", '简历日处理 as "resumeProcessDay", '简历处理率 as "resumeProcessRate")
    //    full_data.show(20)
//      .csv("G:\\Projects\\IdeaProjects\\Spark_Competition\\src\\main\\scala\\大数据应用赛_2020\\zhaopin")
  }
}
