package com.lenovo.userprofile

import java.text.SimpleDateFormat
import java.util.{Calendar, GregorianCalendar, TimeZone}
import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StringType, StructField, StructType}

object ETL_behavior_week_4 {
  def main(args: Array[String]): Unit = {
    val util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_behavior_week_4").enableHiveSupport().getOrCreate()

    val hbase_conf = HBaseConfiguration.create()
    //hbase_conf.set("hbase.zookeeper.property.clientPort", "2181")
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    //var all_df = sparkSession.sql("select LOWER(dw.internetemail),dw.submitdate from cctest.dw_incident dw where reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null' ")
    sparkSession.sql("select LOWER(substring_index(t1.email,'@',1)) email,t1.submitdate submitdate, ad.country from (select  LOWER(dw.internetemail) email ,dw.submitdate submitdate from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + util.getDay() + " ','180') ) t1  join ccsd.ad_user_upp ad on LOWER(substring_index(t1.email,'@',1)) = LOWER(ad.user_name)")
      .createTempView("all_data")
    import sparkSession.sqlContext.implicits._
    sparkSession.sql("select lower(ad.user_name),emp.country country1,ad.country country2 from ccsd.ad_user_upp ad left join ccsd.hr_employee emp on lower(ad.user_name) = lower(emp.itcode)").rdd.map(item =>{
      val country = util.filterNotNull(item(1)+"",item(2)+"")
      (item(0)+"",country)
    }).toDF("user_name","country").createTempView("all_country")

    sparkSession.sql("select t1.email email,t1.submitdate submitdate,t2.country country from all_data t1 join all_country t2 on  t1.email = t2.user_name ").createTempView("all")

    val df = sparkSession.read.format("jdbc").options(Map("url" -> util.url , "driver" -> util.driver, "dbtable" -> "tb_country_mapping", "user" -> util.user, "password" -> util.password)).load()
    df.show(10)
    df.createTempView("mysql_tmp")

    var all_df = sparkSession.sql("select t1.email email,t1.submitdate submitdate,t2.traget_country from all t1 join mysql_tmp t2 on lower(t1.country) = lower(t2.workday_country) ")
    all_df.show()

    val peak_with_day = DBHelper.getPeakWithinDay
    val day_time_interval_rdd = all_df.rdd.map(row => {
      (row(0)+"#"+get_day_time_interval(row(1)+"","China",peak_with_day),1)
    }).reduceByKey(_+_).map(row=> {Row(row._1.split("#")(0),row._1.split("#")(1),row._2.toString)})

    val day_schemaString = "email,time_interval,num"
    val day_schema = StructType (day_schemaString.split(",").map(fieldName => StructField(fieldName,StringType,true)))
    val  time_interval_SchemaRDD = sparkSession.createDataFrame(day_time_interval_rdd, day_schema)
    time_interval_SchemaRDD.show()
    time_interval_SchemaRDD.createTempView("day_time_interval_tmp")
    //val day_time_interval_resultDF = sparkSession.sql("select t.email,t.time_interval,t.num from (select email,time_interval,num,row_number() over( partition by email order by  CAST(num AS int) desc ) rn  from day_time_interval_tmp) t where t.rn <=1")
    //val week_time_interval_resultDF =sparkSession.sql("SELECT a.email,a.time_interval,a.num,b.num FROM (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from week_time_interval_tmp) t where t.rn =1) a join (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from week_time_interval_tmp) t where t.rn =2) b  on (a.email = b.email)")

    val day_time_interval_resultDF = sparkSession.sql("SELECT a.email,a.time_interval,a.num,b.num FROM (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from day_time_interval_tmp) t where t.rn =1) a left join (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by  CAST(num AS int) desc ) rn  from day_time_interval_tmp) t where t.rn =2) b  on (a.email = b.email)")

    day_time_interval_resultDF.toDF().show()
    day_time_interval_resultDF.rdd.map(row=>{
      val put = new Put(Bytes.toBytes(row(0)+""))
      if ( (row(2)+"") == (row(3)+"") && !("").equals(row(2))&& row(2)!= null){
        println("num1 : " + row(2) +" num2 : "+ row(3))
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("CN_peak_day_user"), Bytes.toBytes("Average"))
      }
      else
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("CN_peak_day_user"), Bytes.toBytes(row(1).toString))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    val week_time_interval_rdd = all_df.rdd.map(row => {
      if ((row(1)+"").length > 0)
        (row(0).toString+"#"+get_week_time_interval(row(1)+"","China"),1)
      else
        ("",0)}).filter(_._2 != 0).reduceByKey(_+_).map(row=> {Row(row._1.split("#")(0),row._1.split("#")(1),row._2.toString)})

    val week_schemaString = "email,time_interval,num"
    val week_schema = StructType (week_schemaString.split(",").map(fieldName => StructField(fieldName,StringType,true)))
    val  week_time_interval_SchemaRDD = sparkSession.createDataFrame(week_time_interval_rdd, week_schema)
    week_time_interval_SchemaRDD.show()
    week_time_interval_SchemaRDD.createTempView("week_time_interval_tmp")
    // val week_time_interval_resultDF = sparkSession.sql("select t.email,t.time_interval,t.num from (select email,time_interval,num,row_number() over( partition by email order by  CAST(num AS int) desc ) rn  from week_time_interval_tmp) t where t.rn <=1")
    val week_time_interval_resultDF = sparkSession.sql("SELECT a.email,a.time_interval,a.num,b.num FROM (select t.email email,t.time_interval time_interval,t.num num from (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from week_time_interval_tmp) t where t.rn =1) a left join (select t.email email,t.time_interval time_interval,t.num num from  (select email,time_interval,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from week_time_interval_tmp) t where t.rn =2) b  on (a.email = b.email)")
    //val week_time_interval_resultDF = sparkSession.sql("select t1.email,t1.time_interval,t1.num,ad.country from  (select t.email email ,t.time_interval time_interval,t.num num from  (select email,time_interval,num,row_number() over( partition by email order by num desc ) rn from week_time_interval_tmp) t where t.rn <=1) t1 join ad_user ad on LOWER(ad.user_name) = t1.email")

    week_time_interval_resultDF.rdd.map(row=>{
      val put = new Put(Bytes.toBytes(row(0)+""))
      //peak_week_user
      if ((row(2)+"") == (row(3)+"") && !("").equals(row(2))&& row(2)!= null ){
        println("num1 : " + row(2) +" num2 : "+ row(3))
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("CN_peak_week_user"), Bytes.toBytes("Average"))
      }
      else
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("CN_peak_week_user"), Bytes.toBytes(row(1).toString))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //ticket_trend
    val all_month_df = sparkSession.sql("select LOWER(substring_index(t1.email,'@',1)) email,t1.submitdate submitdate, ad.country from (select  LOWER(dw.internetemail) email ,dw.submitdate submitdate from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + util.getDay() + " ','180') ) t1  join ccsd.ad_user_upp ad on LOWER(substring_index(t1.email,'@',1)) = LOWER(ad.user_name)")
    val one_month_df = sparkSession.sql("select LOWER(substring_index(t1.email,'@',1)) email,t1.submitdate submitdate, ad.country from (select  LOWER(dw.internetemail) email ,dw.submitdate submitdate from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + util.getDay() + " ','30') ) t1  join ccsd.ad_user_upp ad on LOWER(substring_index(t1.email,'@',1)) = LOWER(ad.user_name)")
    val avg = ( all_month_df.rdd.map(row => {(row(0),1)}).count().toDouble) / (all_month_df.rdd.map(row => {(row(0),1)}).reduceByKey(_+_).count()).toDouble
    print("avg : " + avg)
    one_month_df.rdd.map(row => {(row(0),1)}).reduceByKey(_+_).map(row=>{
      val put = new Put(Bytes.toBytes(row._1+""))
      //ticket_trend
      println("name : " + row._1+" value : "+row._2)
      if (row._2.toString.toDouble > avg ){
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_trend"), Bytes.toBytes("Increase"))
      } else if (row._2.toDouble < avg ){
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_trend"), Bytes.toBytes("Decrease"))
      } else
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_trend"), Bytes.toBytes("Average"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    sparkSession.stop()

  }

  def get_day_time_interval(time :String,country:String,peak_with_day:String):String={
    if(time==null || "".equals(time) || "null".equals(time.toLowerCase)) return  "NULL"

    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    //var date = sdf.format(time)
    val util = new Utils

    val c1 = new GregorianCalendar
    c1.setTime(sdf.parse(time))
    c1.setTimeZone(TimeZone.getTimeZone(util.get_locale(country)))

    val hour = c1.get(Calendar.HOUR_OF_DAY)
    if (peak_with_day.split("-")(0).split(":")(0).toInt <= hour && hour < peak_with_day.split("-")(1).split(":")(0).toInt)
      return "Morning"
    else if (peak_with_day.split("-")(1).split(":")(0).toInt <= hour && hour < peak_with_day.split("-")(2).split(":")(0).toInt)
      return "Noon"
    else if (peak_with_day.split("-")(2).split(":")(0).toInt <= hour && hour < peak_with_day.split("-")(3).split(":")(0).toInt)
      return "Afternoon"
    else if (peak_with_day.split("-")(3).split(":")(0).toInt <= hour && hour < peak_with_day.split("-")(4).split(":")(0).toInt)
      return "Before off work"
    else
      return "Overtime"
  }
  def get_week_time_interval(time :String,country:String):String={

    if(time==null || "".equals(time) || "null".equals(time.toLowerCase)) return  "NULL"
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    //var date = sdf.format(time)
    val util = new Utils

    val c1 = new GregorianCalendar
    c1.setTime(sdf.parse(time))
    c1.setTimeZone(TimeZone.getTimeZone(util.get_locale(country)))

    val day = c1.get(Calendar.DAY_OF_WEEK)
    if (day ==2)
      "Monday"
    else if (day ==3)
      "Tuesday"
    else if (day ==4)
      "Wednesday"
    else if (day ==5)
      "Thursday"
    else if (day ==6)
      "Friday"
    else if(day ==1 || day == 7)
      "Weekend"
    else day+""
  }
}