package cn.doitedu

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.sql.EsSparkSQL
/**
 * @Author: 深似海
 * @Site: <a href="www.51doit.com">多易教育</a>
 * @QQ: 657270652
 * @Date: 2024/4/30
 * @Desc: 学大数据，上多易教育
 *
 *   1000万条数据，导入es，大约20多分钟
 *
 **/
object Job04_UserProfileTags2Es {

  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
    conf.set("es.index.auto.create", "true")
    conf.set("es.nodes", "doitedu")
      .set("es.port", "9200")
      .set("es.nodes.wan.only", "true")


    val spark = SparkSession.builder()
      .appName("画像数据导入es")
      .config(conf)
      .config("spark.sql.shuffle.partitions", 1)
      //.master("local")
      .enableHiveSupport()
      .getOrCreate()


    val df = spark.sql(
      s"""
        |
        |with t01 as (
        |select
        |*
        |from tmp.user_profile_01
        |where dt='${args(1)}'
        |)
        |,
        |t02 as (
        |select
        |*
        |from tmp.user_profile_02
        |where dt='${args(1)}'
        |)
        |,
        |
        |t03 as (
        |select
        |*
        |from tmp.user_profile_03
        |where dt='${args(1)}'
        |)
        |,
        |t04 as (
        |select
        |*
        |from tmp.user_profile_04
        |where dt='${args(1)}'
        |)
        |
        |select
        |ids.guid
        |,t01.tag_01_01
        |,t01.tag_01_02
        |,t01.tag_01_03
        |,t01.tag_01_04
        |
        |,t02.tag_02_01
        |,t02.tag_02_02
        |,t02.tag_02_03
        |,t02.tag_02_04
        |
        |,t03.tag_03_01
        |,t03.tag_03_02
        |,t03.tag_03_03
        |,t03.tag_03_04
        |
        |,t04.tag_04_01
        |,t04.tag_04_02
        |,t04.tag_04_03
        |,t04.tag_04_04
        |
        |from (
        |select
        |    guid
        |from t01
        |group by guid
        |union
        |select
        |    guid
        |from t02
        |group by guid
        |union
        |select
        |    guid
        |from t03
        |group by guid
        |union
        |select
        |    guid
        |from t04
        |group by guid
        |) ids
        |left join t01 on ids.guid = t01.guid
        |left join t02 on ids.guid = t02.guid
        |left join t03 on ids.guid = t03.guid
        |left join t04 on ids.guid = t04.guid
        |
        |""".stripMargin

    )


    // 将上面加工好的dataframe，调用es-spark整合api，批量写入es
    val config = Map("es.mapping.id" -> "guid")
    EsSparkSQL.saveToEs(df, args(0), config)


    spark.stop()

  }


}
