package cn.spark.study.sql

import org.apache.spark.SparkContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.types.DataTypes
import org.apache.spark.sql.types.LongType
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.IntegerType


/**
	 * 日期 用户 搜索词 城市 平台 版本
	 * 需求：
		1、筛选出符合查询条件（城市、平台、版本）的数据
		2、统计出每天搜索uv排名前3的搜索词
		3、按照每天的top3搜索词的uv搜索总次数，倒序排序
		4、将数据保存到hive表中
	 */
object DailyTop3KeyWord {
  def main(args: Array[String]): Unit = {
//    val sc = new ContextUtil().getContextL("DailyTop3KeyWord")
    val sc = new ContextUtil().getContext("DailyTop3KeyWord")
    dailyTop3KeyWord(sc)
  }
  
  def dailyTop3KeyWord(sc:SparkContext){
    
    val sqlC = new HiveContext(sc);
//    val sqlC = new SQLContext(sc)
    val logs = sc.textFile("hdfs://spark1:9000/spark-study/keyword.txt", 2);
//    val logs = sc.textFile("C://Users//e20160504//Desktop//keyword.txt")
    
    //模拟过滤条件
    val filterMap = Map("city"->List("beijing"),"platform" -> List("android"),"version" -> List("1.0","1.2","1.5","2.0"));
    val broadcast = sc.broadcast(filterMap);
    
    //首先对初始rdd进行过滤
    val filterRDD = logs.filter { log => 
      val citys = log.split("\t")(3)
      val platforms = log.split("\t")(4)
      val versions = log.split("\t")(5)
      
      val filters = broadcast.value
      
      if(filters.size> 0 
          && filters("city").contains(citys) 
          && filters("platform").contains(platforms) 
          && filters("version").contains(versions)){
         true
      }else{ false }
    }
    
    // map成 （date_word,user） 还没有去重
    val dateWordUsers = filterRDD.map { filteLog => 
      val date = filteLog.split("\t")(0)
      val word = filteLog.split("\t")(1)
      val user = filteLog.split("\t")(2)
      (date + "_" + word,user) 
      }
    
//    分组
    val groupedDateWordUser = dateWordUsers.groupByKey()
    
    //去重统计 uv map成 rdd<row(date,word,uv)>
    val distinctUvRDD = groupedDateWordUser.map{tuple => 
      val dateWord = tuple._1.split("_")
      val date = dateWord(0)
      val word = dateWord(1)
      val ite = tuple._2
      val itor = ite.toIterator
      val userList = ArrayBuffer[String]()
      while(itor.hasNext){
        val user = itor.next()
        if(!userList.contains(user)){
          userList += user
        }
      }
      val uv = userList.size
      Row(date,word,uv)
    }
    val structType = StructType(Array(StructField("date",StringType,true),StructField("word",StringType,true),StructField("uv",IntegerType,true)))
    val UvRowDF = sqlC.createDataFrame(distinctUvRDD, structType)
    UvRowDF.registerTempTable("UvRowDF")
//    val sql = "select date,word,uv from UvRowDF"
//    sqlC.sql(sql).rdd.foreach { println }
//    使用开窗函数去前三
    val sql = "select date,word,uv from (" + 
                "select date,word,uv, row_number() over (partition by date order by uv desc) rn from UvRowDF" +
	              ") tmp " + 
	                "where rn <= 3"
    val top3DailyUv = sqlC.sql(sql)
    top3DailyUv.saveAsTable("top3DailyUv_scala")
    
  }
}























