package com.lenovo.userprofile


import java.sql.DriverManager
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}

import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.{Row, SaveMode, SparkSession}

object ETL_nature_week_1 {
  def main(args: Array[String]): Unit = {
    var util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_nature_week_1").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    //var new_user_df = sparkSession.sql("select LOWER(itcode),hire_date from ccsd.hr_employee where LOWER(currently_active)='yes' and itcode is not null and itcode !='' and LOWER(itcode) !='null'")
    var new_user_df = sparkSession.sql("select LOWER(ad.user_name),date_format(emp.hire_date ,'YYYY-MM-dd') hire_date,from_unixtime(unix_timestamp(ad.when_created,'MM/dd/yyyy HH:mm'),'yyyy-MM-dd') when_created from ccsd.ad_user_upp ad left join ccsd.hr_employee emp on LOWER(emp.itcode)= LOWER(ad.user_name) where  ad.user_name is not null and ad.user_name !='' and lower(ad.user_name)!='null' ")

    val new_user_conf = DBHelper.getNewUserConf
    println("***************")
    println(new_user_conf)
    println("***************")
    var rdd = new_user_df.rdd.filter(row=>{ row!=null && row.length>0 && row(0)!=null && !"null".equals((row(0)+"").toLowerCase) && !"".equals(row(0)+"")}).map { col => {
      val put = new Put(Bytes.toBytes(col(0).toString.toLowerCase()+""))

      put.addColumn(Bytes.toBytes("nature"), Bytes.toBytes("is_new_user"), Bytes.toBytes(util.getWorkingMonth (util.filterNotNull(col(2)+"",col(1)+""),new_user_conf)))

      (new ImmutableBytesWritable, put)
    }}.saveAsHadoopDataset(jobConf)

    sparkSession.stop()
  }
}
