package com.lenovo.userprofile

import java.text.SimpleDateFormat
import java.util.Date

import com.lenovo.function.Utils
import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{StringType, StructField, StructType}

object ETL_behavior_week_1 {
  def main(args: Array[String]): Unit = {

    val utils = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_behavior_week_1").enableHiveSupport().getOrCreate()

    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    //val behavior_all_df = sparkSession.sql("select product_categorization_tier_1,internetemail from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' ) AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + utils.getDay() + " ','180') " )
    val behavior_all_df = sparkSession.sql("select t1.product_categorization_tier_1 product_categorization_tier_1,t1.email email from (select product_categorization_tier_1,LOWER(substring_index(internetemail,'@',1)) email from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others' )AND internetemail is not null AND internetemail!='' AND LOWER(internetemail)!= 'null'  AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + utils.getDay() + " ','180') ) t1 join ccsd.ad_user_upp ad on t1.email = LOWER(ad.user_name) ")


    behavior_all_df.createTempView("behavior_all_tmp")

    val app_ticket_num_df = sparkSession.sql("select product_categorization_tier_1,email from behavior_all_tmp")

    val Business_Application_list = List("Applications - Strategic","Applications - Non Strategic China","Applications - Manufactory")
    val Connunication_list = List("Communication & Collaboration")
    val Network_Voice_list=List("DC Operation","Facility","Network","Security","Server","Storage","Voice")
    val Desktop_Software_list = List("Desktop & Office Software", "Hardware Maintenance","Infrasturcture Application","Others")


    val app_ticket_rdd= app_ticket_num_df.rdd.filter(row=>{ row !=null && row(1)!=null}).map(row=>{
      if (Business_Application_list.contains(row(0)))
        (row(1),1)
      else
        (row(1),-1)
    })
    //val app_ticket_num_avg = (app_ticket_rdd.filter(item=>item._2 == -1).reduceByKey(_+_).count().toDouble)/(app_ticket_rdd.filter(item=>item._2 == -1).count().toDouble)
    //val not_app_ticket_num_avg = (app_ticket_rdd.filter(item=>item._2 == 1).reduceByKey(_+_).count().toDouble)/(app_ticket_rdd.filter(item=>item._2 == 1).count().toDouble)

    val app_ticket_num_avg = (app_ticket_rdd.filter(item=>item._2 != -1).count().toDouble)/(app_ticket_rdd.filter(item=>item._2 != -1).reduceByKey(_+_).count().toDouble)
    val not_app_ticket_num_avg = (app_ticket_rdd.filter(item=>item._2 != 1).count().toDouble)/(app_ticket_rdd.filter(item=>item._2 != 1).reduceByKey(_+_).count().toDouble)

    val app_conf = DBHelper.getAppConf
    val not_app_conf = DBHelper.getNotAppConf

    app_ticket_rdd.filter(_._2 != -1).reduceByKey(_+_).map(col =>{
      val put = new Put(Bytes.toBytes(null2Str(col._1)))
      //app_ticket_num
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("app_ticket_num"), Bytes.toBytes(freq(col._2,app_ticket_num_avg,app_conf)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    app_ticket_rdd.filter(_._2 != 1).reduceByKey(_+_).map(col =>{
      val put = new Put(Bytes.toBytes(null2Str(col._1)))
      //not_app_ticket_num
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("not_app_ticket_num"), Bytes.toBytes(freq(Math.abs(col._2),not_app_ticket_num_avg,not_app_conf)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    val ticket_type_df = sparkSession.sql("select product_categorization_tier_1,email,count(1) from behavior_all_tmp group by product_categorization_tier_1,email ")
    ticket_type_df.show()
    val ticket_type_rdd = ticket_type_df.rdd.map(row =>{
      if (Business_Application_list.contains(row(0).toString))
        (row(1).toString + "#" + "Business_Application", (row(2).toString.toInt))
      else if (Connunication_list.contains(row(0).toString))
        (row(1)+"#"+"Communication",row(2).toString.toInt)
      else if (Network_Voice_list.contains(row(0).toString))
        (row(1)+"#"+"Network_Voice",row(2).toString.toInt)
      else if (Desktop_Software_list.contains(row(0).toString))
        (row(1)+"#"+"Desktop_Software",row(2).toString.toInt)
      else {
        (row(1)+"#"+" ",0)
      }}).reduceByKey(_+_).map(x =>{Row(x._1.split("#")(0),x._1.split("#")(1),x._2+"")})

    val schemaString = "email type num"
    val schema = StructType (schemaString.split(" ").map(fieldName => StructField(fieldName,StringType,true)))
    val  ticket_type_SchemaRDD = sparkSession.createDataFrame(ticket_type_rdd, schema)
    ticket_type_SchemaRDD.createTempView("ticket_type_tmp")
    //val ticket_type_label_df = sparkSession.sql("select t.email,t.type,t.num from ( select email,type,num,row_number() over ( partition by email order by  CAST(num AS int) desc ) rn  from ticket_type_tmp) t where t.rn <=2 ")
    val ticket_type_label_df = sparkSession.sql("SELECT a.email,a.type,a.num,b.num FROM  (SELECT t.email email,t.type type,t.num num FROM  (SELECT email,type,num,row_number() over ( partition by email order by num desc ) rn  FROM ticket_type_tmp) t where t.rn =1) a left join (SELECT  t.email email,t.type type,t.num num FROM ( SELECT email,type,num,row_number() over ( partition by email order by num desc ) rn  FROM ticket_type_tmp) t where t.rn =2) b on (a.email = b.email)")
    ticket_type_label_df.rdd.filter(row=>{ row != null && row.length>0 && !"".equals(row(0))}).map(col=>{

      val put = new Put(Bytes.toBytes(null2Str(col(0))))
      //ticket_type
      if (col(2)==col(3)&& !("").equals(col(2))&& col(2)!= null && (col(2)+"".toLowerCase)!= "null"){
        println("num1 : " + col(2) +" num2 : "+ col(3))
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_type"), Bytes.toBytes("Average"))
      }
      else
        put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_type"), Bytes.toBytes(col(1).toString))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)
    val ticket_category_ratio_conf = DBHelper.getTicket_Category_Ratio_Conf
    val ticket_ticket_distribution_df = sparkSession.sql("select t.email,b.type,b.num,t.all from( (select email,sum(num) as all from ticket_type_tmp group by ticket_type_tmp.email )t join ticket_type_tmp as b on t.email = b.email )")
    ticket_ticket_distribution_df.show()
    ticket_ticket_distribution_df.rdd.filter(row=>{row!=null && row.length>0 && row(1)!=null && row(2)!=null && row(3)!=null}).map(col => {
      val per = (col(2).toString.toDouble / col(3).toString.toDouble)
      val agr = col(1) + "#" + getTicketRation(per,ticket_category_ratio_conf)
      (col(0)+"",agr)
    }).reduceByKey(_+","+_).filter(_._1.toString.length>0).map(col=>{
      val put = new Put(Bytes.toBytes(col._1+""))
      //ticket_distribution
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_distribution"), Bytes.toBytes(col._2))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)
    sparkSession.stop()
  }
  //null2Str function
  def null2Str(data :Any):String={
    if (data==null||"".equals(data))
      return "";
    else
      return data.toString();
  }
  //freq function
  def freq(data:Double,avg:Double,times:String):String ={

    if (data < avg*(times.split("-")(0).toDouble))
      return "Low"
    else if (data >= avg*(times.split("-")(0).toDouble) && data <= avg*(times.split("-")(1).toDouble))
      return "Medium"
    else if (data > avg*(times.split("-")(1).toDouble))
      return "High"
    else
      return ""
  }


  //getTicketRation function
  def getTicketRation(data:Double,times:String):String ={

    if (0<data && data*100 < times.split("-")(0).toDouble)
      return "Low"
    else if (data*100 >= times.split("-")(0).toDouble && data*100 <= times.split("-")(1).toDouble)
      return "Medium"
    else if (data*100 > times.split("-")(1).toDouble && data*100 <= times.split("-")(2).toDouble)
      return "High"
    else
      //return "Ultra high"
      return "Ultrahigh"
  }

}
