package com.lenovo.userprofile

import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

/**
  * userprofile nature base label
  */
object ETL_nature_mon_1 {
  def main(args: Array[String]): Unit = {
    var util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_nature_mon_1").enableHiveSupport().getOrCreate()
    // mapping ad_user and hr_employee select LOWER(emp.itcode),ad.employee_type,emp.hire_date,emp.country,ad.country,emp.city,ad.city,LOWER(emp.manager_id),emp.position_worker_type from cctest.hr_employee emp left join cctest.ad_user ad on LOWER(emp.itcode)= LOWER(ad.user_name)

    val hbase_conf = HBaseConfiguration.create()
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, util.tablename)

    var country_map:Map[String,String] = Map()
    DBHelper.get_country_map().toArray().foreach(item =>{
      country_map += (item.toString.split("#")(0)-> item.toString.split("#")(1))
    })

    var city_map:Map[String,String] = Map()
    DBHelper.get_city_map().toArray().foreach(item =>{
      city_map += (item.toString.split("#")(0)-> item.toString.split("#")(1))
    })

    val all_user_df = sparkSession.sql("select LOWER(ad.user_name) itcode,ad.employee_type type,date_format(emp.hire_date ,'YYYY-MM-dd') hire_date,from_unixtime(unix_timestamp(ad.when_created,'MM/dd/yyyy HH:mm'),'yyyy-MM-dd') when_created,emp.country country1,ad.country country2,emp.city city1,ad.city city2,emp.manager_id manager_id ,emp.employee_id employee_id,emp.position_worker_type position_worker_type from ccsd.ad_user_upp ad left join ccsd.hr_employee emp on LOWER(emp.itcode)= LOWER(ad.user_name) where  ad.user_name is not null and ad.user_name !='' and lower(ad.user_name)!='null' ")

    val user_df = all_user_df.select("itcode", "type", "hire_date", "country1", "country2", "city1", "city2", "manager_id", "employee_id","position_worker_type","when_created")
    // emp_manager_df
    //val emp_manager_df = user_df.select("emp.employee_id", "itcode", "emp.manager_id")
    //var mananger_list = List()

    val emp_manager_df =  sparkSession.sql("select ad.manager,emp.manager_id from ccsd.ad_user_upp ad left join ccsd.hr_employee emp on LOWER(emp.itcode)= LOWER(ad.user_name) where emp.manager_id is not null and emp.manager_id !='' and lower(emp.manager_id)!='null'")
    emp_manager_df.show()
    val mananger_list = emp_manager_df.rdd.map(_(0)+"").collect().toList ::: emp_manager_df.rdd.map(_(1)+"").collect().toList


    var user_band:Map[String,String] = Map()
    sparkSession.sql("select LOWER(itcode),band from ccsd.hr_band where itcode is not null and itcode !='' and lower(itcode) !='null' and band is not null and band !='' and lower(band) !='null'").rdd.map(item=>{(item(0)+"#"+item(1))})
      .collect().toList.foreach(item=>{
      if (item.split("#").length==2 ){
        user_band += (item.split("#")(0) -> item.split("#")(1))
      }
    })


    var rdd = user_df.rdd.map { col => {

      val put = new Put(Bytes.toBytes(util.null2Str(col(0)).toLowerCase()))
      //username
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("user_name"), Bytes.toBytes(util.null2Str(col(0))))

      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("title_change"), Bytes.toBytes("N"))
      //employee_type
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("employee_type"),Bytes.toBytes(util.filterNotNull(util.null2Str(col(9)), util.null2Str(col(1)))))
      //working_years
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("working_years"), Bytes.toBytes(util.getWorkingYears(util.filterNotNull(col(10)+"",col(2)+""))))
      //country
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("country"), Bytes.toBytes( get_target_country(util.filterNotNull((col(3)+""), util.null2Str(col(4))),country_map)))
      //city
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("city"), Bytes.toBytes( get_target_city(util.filterNotNull(col(5)+"", util.null2Str(col(6))),city_map)))
      //
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("manager"), Bytes.toBytes(util.isManager(util.null2Str(col(8)), mananger_list,take_band(user_band,col(0)+""))))

      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_sr"), Bytes.toBytes("N"))
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_qi"), Bytes.toBytes("N"))
      (new ImmutableBytesWritable, put)
    }
    }
    rdd.saveAsHadoopDataset(jobConf)
    sparkSession.stop()
  }
  def take_band(all:Map[String,String],itcode:String):String={
    if(all.contains(itcode)){
      return (all(itcode))
    }
    return "NULL"
  }
  def get_target_country(country:String,country_map:Map[String,String]) :String ={
    if (country_map.contains(country+"")){
      return country_map(country)
    }else return "NULL"
  }

  def get_target_city(city:String,city_map:Map[String,String]) :String ={
    if (city_map.contains(city+"")){
      return city_map(city)
    }else return "NULL"
  }

}
