import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2019/11/9 09:41
  * @ Param:  ${PARAM}
  * @ Description: s"s3a://dws-data/g_shop/${year}/${month}/${platform}"
  */
object i {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName(s"${this.getClass.getSimpleName}")
    conf.set("spark.debug.maxToStringFields", "500")
    conf.setMaster("local[*]")
    conf.set("es.nodes", "192.168.1.157")
    conf.set("es.port", "9200")
    conf.set("cluster.name", "O2OElastic")
    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.sql.caseSensitive", "true")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    sc.setLogLevel("WARN")
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    import org.elasticsearch.spark._
    val yt_shop = sqlContext.read.option("header","true").csv("D:\\yantai.csv")
    yt_shop.createOrReplaceTempView("yt_shop")
    println(yt_shop.count())
    val values: RDD[String] = sc.esJsonRDD(s"2019_tmall",
      """
        |{
        |  "query": {
        |    "bool": {
        |      "must": [
        |        {
        |          "term": {
        |            "province.keyword": {
        |              "value": "山东省"
        |            }
        |          }
        |        },{
        |          "term": {
        |            "city.keyword": {
        |              "value": "烟台市"
        |            }
        |          }
        |        }
        |      ]
        |    }
        |  }
        |}
      """.stripMargin).values

    sqlContext.read.json(values).createOrReplaceTempView("source_data")

    val result = sqlContext.sql(
      s"""
         |select
         |t1.*,
         |t2.userId
         |from yt_shop t1
         |inner join
         |source_data t2
         |on t1.shopId=t2.shopId
       """.stripMargin).dropDuplicates("userId").withColumnRenamed("userId","anchorId")

    result.repartition(1).write.option("header","true").csv("D:\\anchor_2019")
  }

  def isIntByRegex(s : String) = {
    val pattern = """^(\d+).(\d+)$""".r
    s match {
      case pattern(_*) => true
      case _ => false
    }
  }
}
