package com.lenovo.userprofile

import com.lenovo.function.Utils
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

/**
  * asset_name
  */
object ETL_nature_mon_2 {
  def main(args: Array[String]): Unit = {
    var util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_nature_mon_2").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, util.tablename)
    //asset_df
   // val asset_df = sparkSession.sql("select itcode,assetname from  cctest.hr01_asset  select LOWER(name),assetname from cctest.hr01_asset where name is not null")
   val asset_df = sparkSession.sql("SELECT LOWER(t1.itcode), t1.assetname FROM ( SELECT LOWER(itcode) itcode,assetname FROM ccsd.hr_asset WHERE itcode IS NOT NULL AND itcode != '' AND LOWER(itcode) != 'null') t1 JOIN ccsd.ad_user_upp ad ON t1.itcode = LOWER(ad.user_name)")
    //物品与物品之间以为逗号分隔
    val asset_rdd = asset_df.rdd.filter(row=>{row(0)!=null && !"NULL".equals(row(0))}).map(row=>{(util.null2Str(row(0)),row(1))}).reduceByKey(_ +","+_)//.foreach(item =>{print(item._1+" : "+item._2)})

    var result = asset_rdd.map(row =>{
      val put = new Put(Bytes.toBytes(util.null2Str(row._1)))
      //asset
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("asset_name"), Bytes.toBytes(util.null2Str(row._2)))
      (new ImmutableBytesWritable, put)
    })
    result.saveAsHadoopDataset(jobConf)
    sparkSession.stop()
  }
}
