package cn.tecnova.cd

import cn.tecnova.utils.ConfigHandler
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Dataset, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.elasticsearch.spark.rdd.EsSpark

/**
  * description:
  * 5503 6927
  **/
object TecnovaEmployees {
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("es.index.auto.create", "true")
      .set("es.nodes", ConfigHandler.esNodes)
      .set("es.port", ConfigHandler.esPort)
      .set("es.nodes.wan.only", "true")

    val sc = new SparkContext(conf)
    val sqLContext = new SQLContext(sc)

    import sqLContext.implicits._
    val rowDF = sqLContext.read.jdbc("jdbc:mysql://www.slave4.com:3306/tec_employees", "qichacha_employees","id",1,55036927,10, ConfigHandler.props2)

    //导入写es相关的包
    import org.elasticsearch.spark.sql._

    val result: Dataset[TecnovaEmployees] = rowDF.map(row => {
      TecnovaEmployees(
        row.getAs[Int]("id"),
        row.getAs[String]("KeyNo"),
        row.getAs[Int]("No"),
        row.getAs[String]("Name"),
        row.getAs[String]("Job"),
        row.getAs[String]("CerNo"),
        row.getAs[String]("ScertName")

      )
    })
//    result.saveToEs("tecnova_employees2" + "/company_employees")

    EsSpark.saveToEs(result.rdd,"tecnova_employees/company_employees",Map("es.mapping.id" -> "id"))


    sc.stop()

  }

}

case class TecnovaEmployees(

                           var id:Long,
                           var keyNo:String,
                           var no:Int,
                           var name:String,
                           var job:String,
                           var cerNo:String,
                           var scertName:String

                           )
