package com.lenovo.userprofile

import java.text.SimpleDateFormat
import java.util.Date
import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

  object ETL_attitude_weel_1 {

  def main(args: Array[String]): Unit = {
    var util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_attitude_weel_1").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)
    var days_urgency = DBHelper.getUrgencyConf*7
    var days_compliment = DBHelper.getComplimentConf*7
    var days_complaint = DBHelper.getComplaintConf*7

    //val all_df_urgency = sparkSession.sql("SELECT LOWER(substring_index(t.`e-mail`,'_',1)),t.`reported_date` FROM `ccsd`.`mim_incident` t join ccsd.ad_user_upp ad on LOWER(substring_index(t.`e-mail`,'_',1)) = lower(ad.user_name)  WHERE from_unixtime(unix_timestamp(t.`reported_date`,'yyyy/MM/dd HH:mm'),'yyyy-MM-dd') > date_sub('"+util.getDay()+"',"+days_urgency+")")
    val all_df_urgency = sparkSession.sql("SELECT LOWER(substring_index(t.`e-mail`,'_',1)),min(datediff(\""+util.getDay()+"\" ,cast(from_unixtime(unix_timestamp(t.`reported_date`,'yyyy/MM/dd HH:mm'),'yyyy-MM-dd') as string ) ) ) FROM `ccsd`.`mim_incident` t join ccsd.ad_user_upp ad on LOWER(substring_index(t.`e-mail`,'_',1)) = lower(ad.user_name) group by (LOWER(substring_index(t.`e-mail`,'_',1)))")
    all_df_urgency.show(50)

    all_df_urgency.rdd.map(item=>{
      val put = new Put(Bytes.toBytes(item(0)+""))
      put.addColumn(Bytes.toBytes("attitude"), Bytes.toBytes("urgency"), Bytes.toBytes(urgency_covertime(item(1)+"")))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //val all_df_compliment = sparkSession.sql("select t1.email from (SELECT LOWER(substring_index(t2.internetemail,'@',1)) email FROM ccsd.itsm_dw_incident t1 join ccsd.itsm_dw_incident t2 on substr(t1.description,instr(t1.description,'INC'),15) = t2.incidentid where lower(t1.categorization_tier_1) = 'customer voice' AND lower(t1.categorization_tier_2) ='compliment'  AND t1.description like '%INC%' and date_format(t1.submitdate,'yyyy-MM-dd') > date_sub('"+util.getDay()+"',"+days_compliment+") ) t1 join ccsd.ad_user_upp ad on t1.email = LOWER(ad.user_name)")
    val all_df_compliment = sparkSession.sql("select t3.email,t3.time from (SELECT LOWER(substring_index(t2.internetemail,'@',1)) email,min( datediff(\""+util.getDay()+"\" ,cast(date_format(t1.submitdate,'yyyy-MM-dd')  as string) ) )time FROM ccsd.itsm_dw_incident t1 join ccsd.itsm_dw_incident t2 on substr(t1.description,instr(t1.description,'INC'),15) = t2.incidentid where lower(t1.categorization_tier_1) = 'customer voice' AND lower(t1.categorization_tier_2) ='compliment'  AND t1.description like '%INC%' GROUP BY(LOWER(substring_index(t2.internetemail,'@',1)))) t3 join ccsd.ad_user_upp ad on t3.email = LOWER(ad.user_name) ")
    all_df_compliment.show(50)

    all_df_compliment.rdd.map(item=>{
      val put = new Put(Bytes.toBytes(item(0)+""))
        put.addColumn(Bytes.toBytes("attitude"), Bytes.toBytes("praise"), Bytes.toBytes(urgency_covertime(item(1)+"")))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //筛选出最近的日期中然后进行时间差比较（如果有最近三个月和最近一年，输出位最近三个月内）
    //val all_df_complaint = sparkSession.sql("select t1.email from (SELECT LOWER(substring_index(t2.internetemail,'@',1)) email FROM ccsd.itsm_dw_incident t1 join ccsd.itsm_dw_incident t2 on substr(t1.description,instr(t1.description,'INC'),15) = t2.incidentid where lower(t1.categorization_tier_1) = 'customer voice' AND lower(t1.categorization_tier_2) ='complaint'  AND t1.description like '%INC%' and date_format(t1.submitdate,'yyyy-MM-dd') > date_sub('"+util.getDay()+"',"+days_complaint+") ) t1 join ccsd.ad_user_upp ad on t1.email = LOWER(ad.user_name)")
    val all_df_complaint = sparkSession.sql("select t3.email,t3.time from (SELECT LOWER(substring_index(t2.internetemail,'@',1)) email,MIN(datediff(\""+util.getDay()+"\",cast(date_format(t1.submitdate,'yyyy-MM-dd') as string) ) )time FROM ccsd.itsm_dw_incident t1 join ccsd.itsm_dw_incident t2 on substr(t1.description,instr(t1.description,'INC'),15) = t2.incidentid where lower(t1.categorization_tier_1) = 'customer voice' AND lower(t1.categorization_tier_2) ='complaint'  AND t1.description like '%INC%' GROUP BY(LOWER(substring_index(t2.internetemail,'@',1)))) t3 join ccsd.ad_user_upp ad on t3.email = LOWER(ad.user_name) ")
    all_df_complaint.show(50)
    all_df_complaint.rdd.map(item=>{
      val put = new Put(Bytes.toBytes(item(0)+""))
      put.addColumn(Bytes.toBytes("attitude"), Bytes.toBytes("complaint"), Bytes.toBytes(urgency_covertime(item(1)+"")))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.stop()

  }
    //根据时间天数差值返回对应的月数区间标签值
    def urgency_covertime(time_diff_data : String):String = {
      if (("").equals(time_diff_data+"") && ("null").equals((time_diff_data+"").toLowerCase)){
        return "NULL"
      }else if(0<time_diff_data.toInt && time_diff_data.toInt <=30){
        return "In Last One Month"
      }else if(30<time_diff_data.toInt && time_diff_data.toInt <=90){
        return "In Last Three Month"
      }else if(90<time_diff_data.toInt && time_diff_data.toInt <=180){
        return "In Last Six Month"
      }else if(180<time_diff_data.toInt && time_diff_data.toInt <=365){
        return "In Last Year"
      }else{
        return "NULL"
      }

    }
}
