package com.spark.hbase

import java.text.SimpleDateFormat

import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.util.Try

/**
  * Created by hjn On 2020.4.30
  * 读取csv文件,进行连接之后,插入HBase数据库
  */
object Hbase_Insert {

  def func(records: Iterator[(String, ((String, String), (String, String)))]) {
    val conf = HBaseConfiguration.create()
    val connection = ConnectionFactory.createConnection(conf)
    val tableName = TableName.valueOf("fwwb")
    try {
      records.foreach(line => {
        val laci = line._1
        val imsi = line._2._1._1
        val timestamp = line._2._1._2
        var longitude = line._2._2._1
        var latitude = line._2._2._2


        val sdf: SimpleDateFormat = new SimpleDateFormat("yyyyMMddHHmmss")
        val date: String = sdf.format(timestamp.toLong)

        longitude = longitude.toDouble.formatted("%.8f")
        latitude = latitude.toDouble.formatted("%.8f")

        if (!imsi.contains("*") && !imsi.contains("^") && !imsi.contains("#") && StringUtils.isNotEmpty(imsi) && StringUtils.isNotEmpty(laci) && date.contains("20181003")) {
          val table = connection.getTable(tableName)
          /*指定行键和时间戳
          public Put(byte[] row, long ts)
          参数： row 行键， ts 时间戳

          从目标字符串中提取子串，作为行键，并加上时间戳
          Put(byte[] rowArray, int rowOffset, int rowLength, long ts)

          指定 列族、限定符、时间戳 ，添加值
          add(byte[] family, byte[] qualifier, long ts, byte[] value)*/


          val put = new Put(Bytes.toBytes(date), date.toLong) // 手动设置时间戳
          put.add(Bytes.toBytes("family"), Bytes.toBytes("longitude"), Bytes.toBytes(longitude))
          put.add(Bytes.toBytes("family"), Bytes.toBytes("latitude"), Bytes.toBytes(latitude))
          put.add(Bytes.toBytes("family"), Bytes.toBytes("laci"), Bytes.toBytes(laci))
          put.add(Bytes.toBytes("family"), Bytes.toBytes("imsi"), Bytes.toBytes(imsi))
          Try(table.put(put)).getOrElse(table.close())

          printf("insert into fwwb (date, imsi, laci, longitude, latitude) values (%s,%s,%s,%s,%s)", date, imsi, laci, longitude, latitude)
          println()
          println()
        }
      })

    } catch {
      case e: Exception => e.printStackTrace()
    } finally {
      if (connection != null) connection.close()
    }
  }

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("Hbase_Insert").setMaster("local[*]")
    //    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")

    // 1.读取数据文件
    val user = sc.textFile("file:///home/hadoop/Desktop/test_scala/data/data.csv")
    val base = sc.textFile("file:///home/hadoop/Desktop/test_scala/data/base.csv")

    val user_data = user.map(line => {
      val fields = line.split(",")
      val timestamp = fields(0)
      val imsi = fields(1)
      val lac_id = fields(2)
      val cell_id = fields(3)
      val laci = lac_id + "-" + cell_id
      ((imsi, laci), timestamp)
    })

    val base_data = base.map(line => {
      val fields = line.split(",")
      val x = fields(0)
      val y = fields(1)
      val laci = fields(2)
      (laci, (x, y))
    })

    val pmt = user_data.map(line => {
      //x._1对应的是元组（（imsi,lac）,time）中的（imsi，lac）
      //x._2对应的是元组（（imsi,lac）,time）中的time
      ((line._1._2), (line._1._1, line._2))
      //(基站ID,(imsi，时间))
    })

    var joined: RDD[(String, ((String, String), (String, String)))] = pmt.join(base_data)

    joined = joined.sortBy(_._2._1._2, true)

    val repartitionRDD = joined.repartition(1)
    repartitionRDD.foreachPartition(func)
    sc.stop()
  }
}