package to_hbase


import com.alibaba.fastjson.JSON
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConverters.asScalaSetConverter
import scala.collection.mutable
import common.Utils
object Map04_Source {

  def main(args: Array[String]): Unit = {
    val conf=new SparkConf().setAppName("Map04").setMaster("local[*]")
    val sc=new SparkContext(conf)
    val inpath="file:///Users/kelisiduofu/IdeaProjects/zhiwang_file/makeDict/map04/InternationalData.Sources.json"
    val lines=sc.textFile(inpath)
    val table_name="detail" //DetailMapInfos
    val job: Job = Utils.get_job(sc, table_name)
    val rdd: RDD[(ImmutableBytesWritable, Put)] = lines.map { line => {
      val tmp: mutable.HashMap[String, String] = new mutable.HashMap[String, String]()
      val obj = JSON.parseObject(line)
      obj.keySet().asScala.foreach(key => {
        val value = obj.getString(key)
        tmp += (key -> value)
      })
      val key = tmp("WebCode") + "_" + tmp("SourceCode")
      val put = new Put(Bytes.toBytes(key))
      put.addColumn(Bytes.toBytes("source"), Bytes.toBytes("WebCode"), Bytes.toBytes(tmp("WebCode")))
      put.addColumn(Bytes.toBytes("source"), Bytes.toBytes("WebName"), Bytes.toBytes(tmp("WebName")))
      put.addColumn(Bytes.toBytes("source"), Bytes.toBytes("SourceCode"), Bytes.toBytes(tmp("SourceCode")))
      put.addColumn(Bytes.toBytes("source"), Bytes.toBytes("SourceName"), Bytes.toBytes(tmp("SourceName")))
      (new ImmutableBytesWritable, put)
    }}
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)//一次行写入,不用写一次关闭一次
    sc.stop()
  }}

