package 大数据应用赛_2020

import org.apache.spark.sql
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.row_number

object Exam1 {

  def main(args: Array[String]): Unit = {
    // 创建环境
    val spark = new sql.SparkSession.Builder()
      .appName("test01")
      .master("local[6]")
      .getOrCreate()
    import spark.implicits._
    // 读取数据
    val data = spark.sparkContext.textFile("dataset/zhaopin.txt")
    // 转换形式
    val data_use = data.map(
      item => (
        (item.split(",")(0)),
        (item.split(",")(1)),
        (item.split(",")(2)),
        (item.split(",")(3)),
        (item.split(",")(4)),
        (item.split(",")(5)),
        (item.split(",")(6)),
        (item.split(",")(7)),
        (item.split(",")(8)),
        (item.split(",")(9)),
        (item.split(",")(10)),
        (item.split(",")(11))
      )
    )
    // 转为DataFrame
    val df = data_use.toDF("职位名称", "薪水", "工作年限", "城市", "公司简介", "公司规模", "所在区", "融资阶段", "所在领域", "职位类型", "简历日处理", "简历处理率")
    df.show()
    // 增加一列id
    val window = Window.orderBy("城市")
    val df_index = df.withColumn("index", row_number().over(window))
    df_index.show()

    // 取出薪水信息
    val salary = df_index.select('薪水, 'index)
    salary.show()
    salary.foreach(println(_))
    // 信息拆分
    val filter = salary
      .map {
        item =>
          var line1 = item.toString().split('[')(1);
          var line2 = line1.toString().split("]")(0); //..k-..k,index
          var line3 = line2.toString().split(",")(1); // index
          var line4 = line2.toString().split(",")(0); // ..k-..k

          var sub_end2_0 = line4.toString().split("-")(0).toString.length - 1;
          var sub_end2_1 = line4.toString().split("-")(1).toString.length - 1;
          if (line4.toString.contains("*")) {
            var base = line4.split("\\*")(0); // ..k-..k
            var sub_endb_0 = base.toString().split("-")(0).toString.length - 1;
            var sub_endb_1 = base.toString().split("-")(1).toString.length - 1;
            var min = base.toString().split("-")(0).substring(0,sub_endb_0).toInt;
            var max = base.toString().split("-")(1).substring(0,sub_endb_1 ).toInt;
            (min, max, line3)
          } else if (line4.toString().contains("-")) {
            var min = line4.toString().split("-")(0).substring(0, sub_end2_0).toInt;
            var max = line4.toString().split("-")(1).substring(0, sub_end2_1).toInt;
            (min, max, line3)
          } else
            (-1, -1, line3)
      }
      .toDF("min_salary", "max_salary", "index")
    filter.show(100)

//    val full_data = df_index.join(filter, df_index.col("index") === filter.col("index"), "left_outer")
//      .select('职位名称, '薪水, 'min_salary, 'max_salary, '工作年限, '城市, '公司简介, '公司规模, '所在区, '融资阶段, '所在领域, '职位类型, '简历日处理, '简历处理率)
//    full_data.show(1000)
  }
}
