package com.lenovo.userprofile

import java.text.SimpleDateFormat
import java.util.Date
import com.lenovo.function.Utils
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

object ETL_system_mon_2 {
  def main(args: Array[String]): Unit = {

    val now: Date = new Date()
    val dateFormat: SimpleDateFormat = new SimpleDateFormat("yyyyMMdd")
    val date = dateFormat.format(now)
    println(date.toLong)
    val now_date_num = date.toLong

    val util = new Utils
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_system_mon_2").enableHiveSupport().getOrCreate()
    val hbase_conf = HBaseConfiguration.create()
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    //ECC 操作频率
    //获取半年的数据 //egp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.egp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("egp_audit_log_half_year")
    val ECC_freq_df = sparkSession.sql("SELECT LOWER(egp.user_name) from egp_audit_log_half_year egp join ccsd.egp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null'  AND cast(egp02.gltgb as bigint) >" + now_date_num )
    ECC_freq_df.show()
    val ECC_freq_avg = ECC_freq_df.rdd.count().toDouble/(ECC_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    ECC_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,ECC_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //ECC_freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("ECC_freq"), Bytes.toBytes(level(item._2,ECC_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //取三个月数据 //egp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.egp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("egp_audit_log_3_month")
    val ECC_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from egp_audit_log_3_month egp join ccsd.egp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) AND cast(egp02.gltgb as bigint) >" + now_date_num)
    ECC_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("ECC_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)



    //SRM-EBP操作频率
    //获取半年的数据  //mgp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.mgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("srm_ebp_half_year")
    val SRM_EBP_freq_df = sparkSession.sql("SELECT LOWER(egp.user_name) from srm_ebp_half_year egp join ccsd.mgp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname))  where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null' AND cast(egp02.gltgb as bigint) >" + now_date_num )
    val SRM_EBP_freq_avg = SRM_EBP_freq_df.rdd.count().toDouble/(SRM_EBP_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    SRM_EBP_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,SRM_EBP_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("SRM_EBP_freq"), Bytes.toBytes(level(item._2,SRM_EBP_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.mgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("srm_ebp_3_month")
    val SRM_EBP_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from srm_ebp_3_month egp join ccsd.mgp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null'  AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) AND cast(egp02.gltgb as bigint) >" + now_date_num)
    //Inactive
    SRM_EBP_ALL_df.show()
    SRM_EBP_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("SRM_EBP_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    //SRM-SUS操作频率
    //获取半年的数据  //rgp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.rgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("srm_sus_half_year")
    val SRM_SUS_freq_df = sparkSession.sql("SELECT egp.user_name from srm_sus_half_year egp join ccsd.rgp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null' AND cast(egp02.gltgb as bigint) >" + now_date_num )
    val SRM_SUS_freq_avg = SRM_EBP_freq_df.rdd.count().toDouble/(SRM_EBP_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    SRM_EBP_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,SRM_SUS_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("SRM_SUS_freq"), Bytes.toBytes(level(item._2,SRM_SUS_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.rgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("srm_sus_3_month")
    val SRM_SUS_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from srm_sus_3_month egp join ccsd.rgp301_usr02 egp02 on lower(egp.user_name) = trim(lower(egp02.bname))  where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) AND cast(egp02.gltgb as bigint) >" + now_date_num)
    SRM_SUS_ALL_df.show()
    SRM_SUS_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("SRM_SUS_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //PP 操作频率
    //获取半年的数据 //prdmdmn_pt_app_access_log
    sparkSession.sql("SELECT LOWER(egp.login_name) user_name,egp.visit_time exec_date,egp.app_name as transaction_code from ccsd.prdmdmn_pt_app_access_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.login_name) where date_format(egp.visit_time,'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("pp_half_year")
    val PP_freq_df = sparkSession.sql("SELECT LOWER(egp.user_name) from pp_half_year egp join ccsd.prdmdmn_pt_user_account egp02 on lower(egp.user_name) = trim(lower(egp02.itcode)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null' AND cast(from_unixtime(cast(egp02.expire_date AS bigint),'yMd') as bigint) >" + now_date_num )
    val PP_freq_avg = PP_freq_df.rdd.count().toDouble/(PP_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    PP_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,PP_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("PP_freq"), Bytes.toBytes(level(item._2,PP_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.sql("SELECT LOWER(egp.login_name) user_name,egp.visit_time exec_date,egp.app_name as transaction_code from ccsd.prdmdmn_pt_app_access_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.login_name) where date_format(egp.visit_time,'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("pp_3_month")
    val PP_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from pp_3_month egp join ccsd.prdmdmn_pt_user_account egp02 on lower(egp.user_name) = trim(lower(egp02.itcode)) where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null'  AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) AND cast(from_unixtime(cast(egp02.expire_date AS bigint),'yMd') as bigint) >" + now_date_num)
    //Inactive
    PP_ALL_df.show()
    PP_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("PP_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    //BW 操作频率（无系统标识的判断，所以不会去结合系统标识有效去操作）
    //获取半年的数据 //bgp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.bgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("bw_half_year")
    val BW_freq_df = sparkSession.sql("SELECT LOWER(egp.user_name) from bw_half_year egp where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null' " )
    val BW_freq_avg = BW_freq_df.rdd.count().toDouble/(BW_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    BW_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,BW_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("BW_freq"), Bytes.toBytes(level(item._2,BW_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.bgp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("bw_3_month")
    val BW_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from bw_3_month egp where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) ")
    //Inactive
    BW_ALL_df.show()
    BW_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("BW_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    //GTS 操作频率
    //获取半年的数据 //ggp_audit_log
    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.ggp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+180+")").createTempView("gts_half_year")
    val GTS_freq_df = sparkSession.sql("SELECT LOWER(egp.user_name) from gts_half_year egp where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null' AND egp.transaction_code is not null AND egp.transaction_code != '' AND lower(egp.transaction_code)!= 'null' ")
    val GTS_freq_avg = GTS_freq_df.rdd.count().toDouble/(GTS_freq_df.rdd.map(item=>{(item,1)}).reduceByKey(_+_).count()).toDouble
    GTS_freq_df.rdd.map(item=>{(item(0),1)}).reduceByKey(_+_).map(item=>{
      println(item._1.toString() + " : " + level(item._2,GTS_freq_avg))
      val put = new Put(Bytes.toBytes(item._1.toString()))
      //freq
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("GTS_freq"), Bytes.toBytes(level(item._2,GTS_freq_avg)))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.sql("SELECT LOWER(egp.user_name) user_name,egp.exec_date exec_date,egp.transaction_code as transaction_code from ccsd.ggp_audit_log egp join ccsd.ad_user_upp ad on lower(ad.user_name) = lower(egp.user_name) where  date_format(cast(unix_timestamp(egp.exec_date,'yyyyMMdd') as timestamp),'YYYY-MM-dd') > date_sub('"+util.getDay()+"',"+90+")").createTempView("gts_3_month")
    val GTS_ALL_df = sparkSession.sql("SELECT DISTINCT(LOWER(egp.user_name)) from gts_3_month egp where egp.user_name is not null and egp.user_name != '' AND lower(egp.user_name)!= 'null'  AND ( egp.transaction_code is null OR egp.transaction_code == '' OR lower(egp.transaction_code)== 'null' ) " )
    //Inactive
    GTS_ALL_df.show()
    GTS_ALL_df.rdd.map(item =>(item(0))).map(item=>{
      val put = new Put(Bytes.toBytes(item.toString()))
      put.addColumn(Bytes.toBytes("system"), Bytes.toBytes("GST_freq"), Bytes.toBytes("Inactive"))
      (new ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)

    sparkSession.stop()
  }
  def level(data:Double,avg:Double):String={
    if (data <= avg) return "Low"
    else if (avg < data && data <= avg*2 ) return "Medium"
    else return "High"
  }
}
