package com.doit.crawler.rule

import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.rdd.EsSpark
import org.elasticsearch.spark.sql.EsSparkSQL

/**
  * Created by hunter.coder 涛哥  
  * 2019/4/23 10:33
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:  将爬虫获取的网页内容知识加工并存入ES知识库
  **/

case class ContentBean(url: String, cat1: String, cat2: String, cat3: String, kwds: String)

object Content2ES {

  def main(args: Array[String]): Unit = {

    // 爬虫爬取的页面内容知识数据输入路径
    var logInPath = "G:\\data_shark\\testdata\\jd_phone_content\\2019-04-15.dat"
    // 知识ES索引库输出source  index/type
    val outIndex = "url_content/phone"

    if (args.length > 1) {
      logInPath = args(0)
    }

    val spark = SparkSession.builder()
      .appName("")
      .config("es.nodes", "c701:9200") //设置es.nodes
      .config("pushdown", "true") //执行sql语句时在elasticsearch中执行只返回需要的数据。这个参数在查询时设置比较有用
      .config("es.index.auto.create", "false") //如果为true，则会自动创建不存在的index
      .config("es.nodes.wan.only", "true") // 可以通过域名来访问
      .master("local")
      .getOrCreate()

    // 加载爬虫知识数据成为一个dataframe
    val df = spark.read.json(logInPath)

    import spark.implicits._

    val ds = df.rdd.map(row => {
      ContentBean(
        row.getAs[String]("url")
        , row.getAs[String]("cat1")
        , row.getAs[String]("cat2")
        , row.getAs[String]("cat3")
        , row.getAs[String]("kwds")
      )
    }).toDS()

    ds.show(10, false)
    val ds2 = ds.distinct()

    // 用es-spark整合插件，将ds集合写入elastic search
    // EsSpark.saveToEs(rdd,index) 将rdd写入es，则用EsSpark
    EsSparkSQL.saveToEs(ds2, outIndex)

    spark.close()

  }

}
