import kafka.serializer.StringDecoder
import org.apache.hadoop.hbase.client.Table
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

object SparkStream  extends App{
  val conf = new SparkConf().setAppName("Ops1").setMaster("local[2]")
  val ssc = new StreamingContext(conf, Milliseconds(1000))
  val kafkaParams = Map("metadata.broker.list" -> "mini:9092")
  val topics=Set("hadoop")
  val inputDS: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
  //获取数据
  val spilts: DStream[Array[String]] = inputDS.map(_._2.split(" "))

  //将数据封装在对象中
  //2019-02-11 19:38:27 POST /course/vip/111 HTTP/1.1 10.172.120.196 www.baidu.com
  val courseObj: DStream[Course] = spilts.map(t => {
    val year = Utils.converDateFormat("yyyy-MM-dd", "yyyyMMdd", t(0))//20190211
    val time = t(1)//19:38:27
    val course = t(3)///course/vip/111
    val courseId = course.substring(course.lastIndexOf("/") + 1)//111
    if (course.contains("free")){
      Course(year, time, courseId, "free")
    }
    else if (course.contains("company")){
      Course(year, time, courseId,"company")
    }
    else{
      Course(year, time, courseId,"vip")
    }
  })

  val urlObj: DStream[Url] = spilts.map(t => {
    val year = Utils.converDateFormat("yyyy-MM-dd", "yyyyMMdd", t(0))//20190211
    val time = t(1)//19:38:27
    val url = t(6)//www.baidu.com
    val ref =url.split('.')(1)//baidu
    Url(year,time,ref)
  })
  val ipObj: DStream[Ip] = spilts.map(t => {
    val year = Utils.converDateFormat("yyyy-MM-dd", "yyyyMMdd", t(0))//20190211
    val time = t(1)//19:38:27
    val ip = t(5)//10.172.120.196
    Ip(year,time,ip)
  })

//组装数据(year+id,1)
  val vipAndOne: DStream[(String, Long)] = courseObj.filter(_.types=="vip").map(obj=>(obj.year+obj.id,1))
  val freeAndOne: DStream[(String, Long)] = courseObj.filter(_.types=="free").map(obj=>(obj.year+obj.id,1))
  val companyAndOne: DStream[(String, Long)] = courseObj.filter(_.types=="company").map(obj=>(obj.year+obj.id,1))
  val urlAndONe: DStream[(String, Long)] = urlObj.map(obj=>(obj.year+obj.url,1))
  val ipAndONe: DStream[(String, Long)] = ipObj.map(obj=>(obj.year+obj.ip,1))
//统计个数
  val vipCount: DStream[(String, Long)] = vipAndOne.reduceByKey(_+_)
  val freeCount: DStream[(String, Long)] =  freeAndOne.reduceByKey(_+_)
  val companyCount: DStream[(String, Long)] =  companyAndOne.reduceByKey(_+_)
  val urlCount: DStream[(String, Long)] = urlAndONe.reduceByKey(_+_)
  val ipCount: DStream[(String, Long)] = ipAndONe.reduceByKey(_+_)

//存储到Hbase rowkey==year+id  列族：f1  列名 count  值：次数
def putData(table:Table,rowKey:String,count:Long): Unit ={
  table.incrementColumnValue(Bytes.toBytes(rowKey),Bytes.toBytes("f1"),Bytes.toBytes("count"),Bytes.toLong(Bytes.toBytes(count)))
}
freeCount.foreachRDD(t=>{
  t.foreachPartition(it=>{
    val freeTable  = Utils.getTable("free")
    for (t <- it) {putData(freeTable,t._1,t._2)}
    freeTable.close()
  })
})
companyCount.foreachRDD(t=>{
  t.foreachPartition(it=>{
    val companyTable  = Utils.getTable("company")
    for (t <- it) {putData(companyTable,t._1,t._2)}
    companyTable.close()
  })
})
vipCount.foreachRDD(t=>{
  t.foreachPartition(it=>{
    val vipTable  = Utils.getTable("vip")
    for (t <- it) {putData(vipTable,t._1,t._2)}
    vipTable.close()
  })
})
urlCount.foreachRDD(t=>{
  t.foreachPartition(it=>{
    val urlTable  = Utils.getTable("url")
    for (t <- it) {putData(urlTable,t._1,t._2)}
    urlTable.close()
  })
})
ipCount.foreachRDD(t=>{
  t.foreachPartition(it=>{
    val ipTable  = Utils.getTable("ip")
    for (t <- it) {putData(ipTable,t._1,t._2)}
    ipTable.close()
  })
})
freeCount.print()
ssc.start()
ssc.awaitTermination()
}
