package com.lenovo.userprofile

import java.sql.DriverManager

import com.lenovo.function.Utils
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

object ETL_nature_mon_5 {

  def main(args: Array[String]): Unit = {
    val util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_offline_map_data").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    val all_user_List = sparkSession.sql("select user_name from ccsd.ad_user_upp").rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase())}).map(item =>{item(0)}).collect().toList

    val band_df = sparkSession.read.format("jdbc").options(Map("url" -> util.url , "driver" -> util.driver, "dbtable" -> "tb_feature_rule_band", "user" -> util.user, "password" -> util.password)).load()

    band_df.show()

    band_df.rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase()) && all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(0).toString().toLowerCase()))
      //nature_band
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("band"), Bytes.toBytes(item(1)+""))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    val kpi_df = sparkSession.read.format("jdbc").options(Map("url" ->util.url, "driver" -> util.driver, "dbtable" -> "tb_feature_rule_kpi", "user" -> util.user, "password" -> util.password)).load()

    kpi_df.show()
    kpi_df.rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase())} && all_user_List.contains(item(0).toString)).map(item =>{
      val put = new Put(Bytes.toBytes(item(0).toString().toLowerCase()))
      //nature_KPI
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("kpi_keyword"), Bytes.toBytes(item(1)+""))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    val mec_df = sparkSession.read.format("jdbc").options(Map("url" ->util.url, "driver" -> util.driver, "dbtable" -> "tb_feature_rule_mec", "user" -> util.user, "password" -> util.password)).load()
    mec_df.show()
    mec_df.rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase())&& all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(0).toString().toLowerCase()))
      //nature_MEC
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("is_monthly_user"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    val rol_df = sparkSession.read.format("jdbc").options(Map("url" ->util.url, "driver" ->util.driver, "dbtable" -> "tb_feature_rule_rol", "user" -> util.user, "password" -> util.password)).load()
    rol_df.show()
    rol_df.rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase()) && all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(0).toString().toLowerCase()))
      //nature_ROL
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("is_rol_user"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    val vip_df = sparkSession.read.format("jdbc").options(Map("url" -> util.url, "driver" ->util.driver, "dbtable" -> "tb_feature_rule_vip", "user" -> util.user, "password" -> util.password)).load()

    vip_df.show()
    vip_df.rdd.filter(item=>{!("").equals(item(0)+"")&& !("null").equals(item(0).toString.toLowerCase()) && all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(0).toString().toLowerCase()))
      //nature_ROL
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("vip"), Bytes.toBytes("VIP"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    vip_df.rdd.filter(item=>{!("").equals(item(5)+"")&& !("null").equals(item(5).toString.toLowerCase())&& all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(5).toString().toLowerCase()))
      //nature_ROL
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("vip"), Bytes.toBytes(item(0).toString+"'s Secretary"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    vip_df.rdd.filter(item=>{!("").equals(item(7)+"")&& !("null").equals(item(7).toString.toLowerCase()) && all_user_List.contains(item(0).toString)}).map(item =>{
      val put = new Put(Bytes.toBytes(item(7).toString().toLowerCase()))
      //nature_ROL
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("vip"), Bytes.toBytes(item(0).toString+"'s EA"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

  }
}
