package com.lenovo.userprofile

import java.text.SimpleDateFormat
import java.util.Date

import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

object ETL_nature_mon_4 {

  def main(args: Array[String]): Unit = {


    var util = new Utils

    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_nature_mon_4").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    var days = DBHelper.getTitleChangeConf*30

    val all_df_ad = sparkSession.sql("select distinct t.user_name,t.job_title job_title,had.job_title his_job_title from (select lower(ad.user_name) user_name,ad.job_title job_title from ccsd.ad_user_upp ad where ad.user_name is not null and ad.user_name !='' and lower(user_name) != 'null') t join ccsd.ad_user_history_tmp had on t.user_name =lower(had.user_name) where t.job_title !=had.job_title and date_format(had.record_date,'yyyy-MM-dd') > date_sub('"+util.getDay()+"',"+days+")")

    all_df_ad.rdd.map(item=>{
      val put = new Put(Bytes.toBytes(item(0)+""))
        put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("title_change"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    val all_df_emp = sparkSession.sql("select distinct t.itcode,t.job_title job_title,hemp.job_title his_job_title from (select lower(emp.itcode) itcode,emp.job_title job_title from ccsd.hr_employee emp join ccsd.ad_user_upp ad on lower(ad.user_name) =lower(emp.itcode)  where emp.itcode is not null and emp.itcode !='' and lower(emp.itcode) != 'null' and lower(emp.currently_active) ='yes') t join ccsd.hr_employee_history_tmp hemp on t.itcode =lower(hemp.itcode) where t.job_title !=hemp.job_title and date_format(hemp.record_date,'yyyy-MM-dd') > date_sub('"+util.getDay()+"',"+days+")")

    all_df_emp.rdd.map(item=>{
      val put = new Put(Bytes.toBytes(item(0)+""))
      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("title_change"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    sparkSession.stop()

  }

}
