package com.spark.core.examples

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

object PvAndUv {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("pvuv")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")
    val lines = sc.textFile("./data/pvuvdata")

    //pv, 统计每个url出现的次数
    val pvRst: RDD[(String, Int)] = lines.map(line => {
      (line.split("\t")(5), 1)
    }).reduceByKey(_ + _)
    // pvRst.sortBy(_._2, false).foreach(println)


    // uv 对ip地址去重 并统计每个url出现的次数
    // ip和 Url 组合去重
    val distinctRDD: RDD[String] = lines.map(data => {
      data.split("\t")(0) + "-" + data.split("\t")(5)
    }).distinct()
    // distinctRDD.foreach(println)

    distinctRDD.map(line => {
      (line.split("-")(1), 1)
    }).reduceByKey(_ + _).sortBy(_._2, false).foreach(println)

    println("uv count end....")

    // 计算每个网址 最活跃的地区和地区的活跃人数
    val groupRDD: RDD[(String, Iterable[String])] = lines.map(data => {
      val arr: Array[String] = data.split("\t")
      (arr(5), arr(1))
    }).groupByKey()

    val siteAreaRDD: RDD[(String, List[(String, Int)])] = groupRDD.map(kv => {
      val site: String = kv._1
      val iter: Iterator[String] = kv._2.iterator
      val hashMap = new mutable.HashMap[String, Int]()
      iter.foreach(area => {
        if (hashMap.contains(area)) {
          hashMap.put(area, hashMap.getOrElse(area, 0) + 1)
        } else {
          hashMap.put(area, 1)
        }
      })
      val newList: List[(String, Int)] = hashMap.toList.sortBy(-_._2)
      (site, newList)
    })
    siteAreaRDD.foreach(println)

  }

}
