package com.lenovo.userprofile



import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

object ETL_behavior_week_5 {


  def main(args: Array[String]): Unit = {

    val util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_behavior_week_4").enableHiveSupport().getOrCreate()

    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    //var all_df = sparkSession.sql("select LOWER(dw.internetemail),dw.submitdate from cctest.dw_incident dw where reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null' ")

    sparkSession.sql("select LOWER(substring_index(t1.email,'@',1)) email,t1.assigned_group assigned_group, ad.country from (select  LOWER(dw.internetemail) email ,dw.assigned_group assigned_group from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + util.getDay() + " ','180') ) t1  join ccsd.ad_user_upp ad on LOWER(substring_index(t1.email,'@',1)) = LOWER(ad.user_name)")
      .createTempView("all_data")


    import sparkSession.sqlContext.implicits._
   sparkSession.sql("select email,assigned_group from all_data").rdd.map(item=>{
      (item(0)+"#"+item(1),1)
    }).reduceByKey(_+_).map(item=>{(item._1.split("#")(0),item._1.split("#")(1),item._2)}).toDF("email","time_interval","num").createTempView("day_time_interval_tmp")

    val day_time_interval_resultDF = sparkSession.sql("SELECT a.email,a.time_interval,a.num FROM (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from day_time_interval_tmp) t where t.rn =1) a ")

    day_time_interval_resultDF.show()

    day_time_interval_resultDF.rdd.map(row=>{
      val put = new Put(Bytes.toBytes(row(0)+""))
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("assgind_group"), Bytes.toBytes(row(1)+""))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.stop()
  }

}
