package com.lenovo.userprofile

import com.lenovo.function.Utils
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf

object ETL_behavior_week_2 {
  def main(args: Array[String]): Unit = {

    var util = new Utils

    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_behavior_week_2").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    hbase_conf.set("hbase.zookeeper.property.clientPort", "2181")

    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    var way_touch_it_all_df = sparkSession.sql("select t1.email,t1.name from (select LOWER(substring_index(dw.internetemail,'@',1)) email,dw.reported_sourcename name from ccsd.itsm_dw_incident dw where LOWER(internetemail) not like '%monitor%' AND reported_sourcename !='Monitor' AND categorization_tier_1 !='Customer Voice' AND ( product_categorization_tier_1 =='Applications - Strategic' OR product_categorization_tier_1 =='Applications - Non Strategic China' OR product_categorization_tier_1 =='Applications - Manufactory' OR product_categorization_tier_1 =='Communication & Collaboration' OR product_categorization_tier_1 =='DC Operation' OR product_categorization_tier_1 =='Facility' OR product_categorization_tier_1 =='Network' OR product_categorization_tier_1 =='Security' OR product_categorization_tier_1 =='Server' OR product_categorization_tier_1 =='Storage' OR product_categorization_tier_1 =='Voice' OR product_categorization_tier_1 =='Desktop & Office Software' OR product_categorization_tier_1 =='Hardware Maintenance' OR product_categorization_tier_1 =='Infrasturcture Application' OR product_categorization_tier_1 =='Others') and dw.internetemail is not null AND  LOWER(dw.internetemail) != 'null' AND dw.reported_sourcename is not null AND upper(dw.reported_sourcename) != 'NULL' AND date_format(dw.submitdate,'yyyy-MM-dd') > date_sub('" + util.getDay() + " ','180') ) t1 join ccsd.ad_user_upp ad on t1.email = LOWER(ad.user_name)")
    val way_touch_it_rdd = way_touch_it_all_df.rdd.map(row => {
        (row(0)+"#"+row(1),1)
      }).reduceByKey(_+_).map(row=> {Row(row._1.split("#")(0),row._1.split("#")(1),row._2+"")})
    val way_touch_it_schemaString = "email,reported_sourcename,num"
    val way_touch_it_schema = StructType (way_touch_it_schemaString.split(",").map(fieldName => StructField(fieldName,StringType,true)))
    val  time_interval_SchemaRDD = sparkSession.createDataFrame(way_touch_it_rdd, way_touch_it_schema)
    time_interval_SchemaRDD.show()
    time_interval_SchemaRDD.createTempView("way_touch_it_tmp")
    val day_time_interval_resultDF = sparkSession.sql("select t.email,t.reported_sourcename,t.num from (select email,reported_sourcename,num,row_number() over( partition by email order by CAST(num AS int) desc ) rn  from way_touch_it_tmp) t where t.rn <=1")
    day_time_interval_resultDF.toDF().show()
    day_time_interval_resultDF.rdd.map(row=>{
      val put = new Put(Bytes.toBytes(null2Str(row(0))))
      //way_touch_it
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("way_touch_it"), Bytes.toBytes(row(1).toString))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    var ticket_sr_df = sparkSession.sql("SELECT lower(ad.user_name) FROM ccsd.ad_user_upp ad JOIN  (SELECT DISTINCT substring_index(lower(dw_srf.requesteremail), '@', 1) itcode   FROM ccsd.itsm_dw_srfunctionenhance dw_srf GROUP BY substring_index(lower(dw_srf.requesteremail), '@', 1)) t on lower(ad.user_name) = t.itcode where ad.user_name is not null and ad.user_name !='' and lower(user_name) != 'null'")

    ticket_sr_df.rdd.map(row => {
      val put = new Put(Bytes.toBytes((null2Str(row(0)))))
      //ticket_sr
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_sr"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    var ticket_qi_df = sparkSession.sql("SELECT lower(ad.user_name) FROM ccsd.ad_user_upp ad JOIN  (SELECT DISTINCT substring_index(lower(dw_srg.requesteremail), '@', 1) itcode   FROM ccsd.itsm_dw_srgeneralinfo dw_srg GROUP BY substring_index(lower(dw_srg.requesteremail), '@', 1)) t on lower(ad.user_name) = t.itcode where ad.user_name is not null and ad.user_name !='' and lower(user_name) != 'null'")
    ticket_qi_df.rdd.map(row => {
      val put = new Put(Bytes.toBytes((null2Str(row(0)))))
      //ticket_qi
      put.addColumn(Bytes.toBytes("behavior"), Bytes.toBytes("ticket_qi"), Bytes.toBytes("Y"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //todo inv_analysis
  }
  def yes2no(str:String):String={
    if (str == null || ("").equals(str) || ("null").equals(str.toLowerCase)  )
      "N"
    else
      "Y"
  }
  //null2Str function
  def null2Str(data :Any):String= {
    if (data == null)
      return ""
    else
      return data+""
  }

  }
