import cn.tecnova.utils.ConfigHandler
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:
  * Rabcheng
  * Date:2019/5/7 10:37
  **/
object InsertData2ES {


  def main(args: Array[String]): Unit = {
    import org.elasticsearch.spark.sql._
    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("es.index.auto.create", "true")
      .set("es.nodes", "192.168.100.3")
      .set("es.port", "9200")
      .set("es.nodes.wan.only", "true")

    val sc = new SparkContext(conf)

    val sQLContext = new SQLContext(sc)

    val frame: DataFrame = sQLContext.read.json("C:\\Users\\54671\\Desktop\\ttttttt.txt")

    frame.saveToEs("ba_user_relation_article"+"/ba_user_relation_article2")

    sc.stop()

  }

}

case class dddd(id: String,
                articleId: String,
                siteName: String,
                mediaCls: String,
                articleTitle: String,
                articleAuthor: String,
                siteUrl: String,
                articleArea: String,
                articlePubdate: String,
                hitWords: String,
                positiveWordCloud: String,
                negativeWordCloud: String,
                hotWords: String,
                sentimentAnalysis: String,
                summary: String,
                nlpSort: String,
                relationWords: String,
                riskScore: Int,
                riskReason: String,
                weightSort: Int,
                updateTime: String)
