package com.lenovo.userprofile

import com.lenovo.jdbc.DBHelper
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession


object ETL_L0_week_1 {
  def main(args: Array[String]): Unit = {
    val sparkSession = SparkSession.builder.master("yarn").appName("ETL_L0_week_1").enableHiveSupport().getOrCreate()

    val hbase_conf = HBaseConfiguration.create()
    //hbase_conf.set("hbase.zookeeper.property.clientPort", "2181")
    val tablename = "upp:upp_user_profile"
    val jobConf = new JobConf(hbase_conf)

    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, tablename)

    sparkSession.sql("select useraccount,faqid,created,subcategory from ccsd.l0_faq_en union select useraccount,faqid,created,subcategory from ccsd.l0_faq_cn").toDF().createTempView("l0_faq_all")

    var all_df = sparkSession.sql("SELECT LOWER(t.useraccount),t.num, Row_Number() OVER (ORDER BY  CAST(t.num AS int) desc) rank FROM(select useraccount,count(distinct useraccount,faqid,created) as num from l0_faq_all WHERE useraccount is not null AND useraccount!='' AND LOWER(useraccount)!='null' group by useraccount order by  CAST(num AS int) desc) t join ccsd.ad_user_upp ad on LOWER(t.useraccount) = LOWER(ad.user_name)")
    val all_num = all_df.rdd.count()
    //all_df.show()
    val l0_conf = DBHelper.getL0Conf.split("-")
    all_df.rdd.filter(row => (row(0) != null && row(0) != "")).map(row => {
      val put = new Put(Bytes.toBytes(row(0)+""))
      //reading_times
      val rownum = (row(2)+"").toInt
      if (rownum <= (all_num * ((l0_conf(2).toDouble)/(l0_conf(0).toDouble+l0_conf(1).toDouble+l0_conf(2).toDouble))).round)
        put.addColumn(Bytes.toBytes("l0_document_property"), Bytes.toBytes("reading_times"), Bytes.toBytes("High"))
      else if (rownum > (all_num * ((l0_conf(2).toDouble)/(l0_conf(0).toDouble+l0_conf(1).toDouble+l0_conf(2).toDouble))).round && rownum <= (all_num * ((l0_conf(2).toDouble+l0_conf(1).toDouble)/(l0_conf(0).toDouble+l0_conf(1).toDouble+l0_conf(2).toDouble))).round)
        put.addColumn(Bytes.toBytes("l0_document_property"), Bytes.toBytes("reading_times"), Bytes.toBytes("Medium"))
      else
        put.addColumn(Bytes.toBytes("l0_document_property"), Bytes.toBytes("reading_times"), Bytes.toBytes("Low"))
      (new  ImmutableBytesWritable, put)
    }).saveAsHadoopDataset(jobConf)


    //l0_type
    val l0_type = sparkSession.sql("SELECT t.useraccount,t.subcategory FROM l0_faq_all t join ccsd.ad_user_upp ad on LOWER(t.useraccount) = LOWER(ad.user_name) where t.subcategory like '%--%' ")

    import sparkSession.sqlContext.implicits._
    l0_type.rdd.map(item=>{
      (item(0)+"#"+item(1).toString.split("--")(0),1)
    }).reduceByKey(_+_).map(item=>{
      (item._1.split("#")(0),item._1.split("#")(1),item._2)
    }).toDF("useraccount","subcategory","num").createTempView("l0_type_tmp")

    val l0_type_df = sparkSession.sql("SELECT a.useraccount,a.subcategory,a.num,b.num FROM (select t.useraccount useraccount,t.subcategory subcategory,t.num num from (select useraccount,subcategory,num,row_number() over( partition by useraccount order by CAST(num AS int) desc ) rn  from l0_type_tmp) t where t.rn =1) a left join (select t.useraccount useraccount,t.subcategory subcategory,t.num num from  (select useraccount,subcategory,num,row_number() over( partition by useraccount order by CAST(num AS int) desc ) rn  from l0_type_tmp) t where t.rn =2) b  on (a.useraccount = b.useraccount)")
    l0_type_df.rdd.map(row=>{
    val put = new Put(Bytes.toBytes(row(0)+""))
    if ( (row(2)+"") == (row(3)+"") && !("").equals(row(2)+"") && row(2)!= null){
      //   println("num1 : " + row(2) +" num2 : "+ row(3))
      put.addColumn(Bytes.toBytes("l0_document_property"), Bytes.toBytes("doc_type"), Bytes.toBytes("Average"))
    }
    else
      put.addColumn(Bytes.toBytes("l0_document_property"), Bytes.toBytes("doc_type"), Bytes.toBytes(row(1)+""))
    (new ImmutableBytesWritable, put)
  }).saveAsHadoopDataset(jobConf)

    sparkSession.stop()
  }
}