package to_hbase_xiemingyang

import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase._
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.HashMap
import scala.util.parsing.json.JSON

object makeDetailDic_step03 {
  val hbase_zq = "10.170.129.35,10.170.129.36,10.170.129.37"
  val hbase_zqPort = "2181"
  val hBase_zParent = "/hbase"
  val configuration = HBaseConfiguration.create()
  this.configuration.set("hbase.zookeeper.quorum", this.hbase_zq)
  this.configuration.set("hbase.zookeeper.property.clientPort", this.hbase_zqPort)
  this.configuration.set("zookeeper.znode.parent", this.hBase_zParent) //some condition you should add;
  val connection = ConnectionFactory.createConnection(this.configuration)
  val admin = connection.getAdmin()

  def inintTable(tablename: String, cloumnFamilys: Array[String]): Table = {
    val tName = TableName.valueOf(tablename)
    if (admin.tableExists(tName)) {
      connection.getTable(tName)
    } else {
      val tDescrip: HTableDescriptor = new HTableDescriptor(tName)
      for (cf <- cloumnFamilys) {
        val cl: HColumnDescriptor = new HColumnDescriptor(cf)
        tDescrip.addFamily(cl)
      }
      admin.createTable(tDescrip)
      connection.getTable(tName)
    }
  }

  def getFamily_column_value(tableName: String, rowkey: String, columnFamily: String): Map[String, String] = {
    val tName = TableName.valueOf(tableName)
    val table = this.connection.getTable(tName)
    val get: Get = new Get(Bytes.toBytes(rowkey))
    get.addFamily(Bytes.toBytes(columnFamily))
    val result: Array[Cell] = table.get(get).rawCells()
    var values: HashMap[String, String] = new HashMap[String, String]()
    if (result != null) {
      for (cell <- result) {
        val column_name: String = Bytes.toString(CellUtil.cloneQualifier(cell))
        val value: String = Bytes.toString(CellUtil.cloneValue(cell))
        values += (column_name -> value)
      }
    }
    table.close() //bigdata.cnki.transferDetailDic_to_Map
    values.toMap
  }

  def make_DetailDic(): Unit = {
    val sparkConf = new SparkConf().setAppName("HBaseTest").setMaster("local")
    val sc = new SparkContext(sparkConf)
    val table: String = new String("DetailMapInfos")
    sc.hadoopConfiguration.set("hbase.zookeeper.quorum", "10.120.65.181,10.120.67.164,10.120.64.243,10.120.64.144")
    sc.hadoopConfiguration.set("hbase.zookeeper.property.clientPort", "2181")
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, table)

    val job = Job.getInstance(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

    //make a cache:
    var changinfoDicCache: HashMap[String, Map[String, String]] = new HashMap[String, Map[String, String]]()
    val inputFile = "hdfs://slave1:8020//InternationalData/DetailDic.json" //读取json文件
    val jsonStr = sc.textFile(inputFile);

    val result = jsonStr.map(s => JSON.parseFull(s))

    val rdds = result.map {
      r =>
        r match {
          case Some(maps: Map[String, Any]) => {
            val thashmap: HashMap[String, String] = new HashMap[String, String]()
            maps.foreach(t => {
              thashmap += (t._1.toString -> t._2.toString)
            })
            val webcodes = List("WHO")
            //check,is need Check
            if (webcodes.contains(thashmap("WebCode"))) {
              //get rowkey
              val rowkey = thashmap("WebCode") + "_" + thashmap("SourceCode")
              val put = new Put(Bytes.toBytes(rowkey))
              //check ConceptId
              var data: Map[String, String] = Map()
              if (changinfoDicCache.contains(rowkey)) {
                data = changinfoDicCache(rowkey)
              } else {
                data = getFamily_column_value("BaseMapInfos", rowkey, "changeDetailId")
                changinfoDicCache += (rowkey -> data)
                println(data)
              }
              if (data.contains(thashmap("ConceptId"))) {
                val columns = data(thashmap("ConceptId")).split(";")
                var newDetail: String = ""

                val addDetail = columns.map {
                  oneline => {
                    thashmap(oneline)
                  }
                }
                //val addDetail = columns.map(_=>thashmap(_))
                newDetail = (addDetail.mkString("_"))

                put.addColumn(Bytes.toBytes(thashmap("ConceptId").replace("/", "_")), Bytes.toBytes(newDetail.replace("/", "_")), Bytes.toBytes(thashmap("DetailName")))

              } else //else DetailId:DetailName;DetailId->DetailName
              {
                put.addColumn(Bytes.toBytes(thashmap("ConceptId").replace("/", "_")), Bytes.toBytes(thashmap("DetailId").replace("/", "_")), Bytes.toBytes(thashmap("DetailName")))
              }
              (new ImmutableBytesWritable, put)
            } else {
              val put = new Put(Bytes.toBytes("WHO_who"))
              put.addColumn(Bytes.toBytes("WebInfo"), Bytes.toBytes("a"), Bytes.toBytes("1"))
              (new ImmutableBytesWritable, put)
            }
            // out Put
          }
        }
    }
    rdds.saveAsNewAPIHadoopDataset(job.getConfiguration())

    sc.stop()
  }

  //以下代码没有起作用
  //处理的是本地(idea)文件DetailNameList/UNIDO_DetailId_DetailName.txt
  //文件中的字段为WebCode SourceCode ConceptId DetailId DetailName
  //为5列则:表名为"DetailMapInfos",rowkey为WebCode+"_"+SourceCode,列族为ConceptId,列名为DetailId,列值为DetailName
  //否则:   表名为"DetailMapInfos",rowkey为"WHO_who",列族为"WebInfo",列名为"a",列名为1
  //get 'DetailMapInfos','Area:108' 什么也查不到
  //get 'DetailMapInfos','WebInfo:a' 什么也查不到
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("HBaseTest").setMaster("local")
    val sc = new SparkContext(sparkConf)
    val table: String = new String("DetailMapInfos")
    sc.hadoopConfiguration.set("hbase.zookeeper.quorum", "10.120.65.181,10.120.67.164,10.120.64.243,10.120.64.144")
    sc.hadoopConfiguration.set("hbase.zookeeper.property.clientPort", "2181")
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, table)

    val job = Job.getInstance(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

    //make a cache:
    val thepath = makeConceptDic_step02.getClass.getClassLoader().getResource("DetailNameList/IBRD_DetailId_DetailName.csv").getPath
    //val thepath = makeConceptDic_step02.getClass.getClassLoader().getResource("test/imf/part-00000-f4a44378-d8ff-43ed-8efa-31500c98399a-c000.csv").getPath

    val jsonStr = sc.textFile(thepath).map(_.split("\t"))
    //WebCode ,SourceCode , ConceptId, DetailId,DetailName
    //val WebCode = "IBRD"
    val rdds = jsonStr.map {
      data => {
        if (data.length == 5) {
          val rowkey = data(0) + "_" + data(1)
          val put = new Put(Bytes.toBytes(rowkey))
          //ConceptId DetailId DetailName)
          put.addColumn(Bytes.toBytes(data(2)), Bytes.toBytes(data(3)), Bytes.toBytes(data(4)))
          (new ImmutableBytesWritable, put)
        } else { //错误处理
          val put = new Put(Bytes.toBytes("IBRD_IBRD"))
          put.addColumn(Bytes.toBytes("A"), Bytes.toBytes("a"), Bytes.toBytes("a"))
          (new ImmutableBytesWritable, put)
        }
      }
    }
    rdds.saveAsNewAPIHadoopDataset(job.getConfiguration())

    sc.stop()
  }
}
