#  网站统计大数据
#  2017-12-27 20:00启动
#  参与人：壮志凌云

from __future__ import print_function
import re
from pyspark.sql import SparkSession
from pyspark.sql.functions import sum,row_number,desc,count,countDistinct,from_unixtime,max,isnull
from pyspark.sql import Window
import json

def refClassIdentify(list):
    dic = list.asDict()
    if(re.search('baidu',dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif (re.search('google', dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif (re.search('so.com', dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif (re.search('bing.com', dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif (re.search('yahoo.com', dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif (re.search('sogou.com', dic['ref'])):
        dic["refClass"] = "搜索引擎"
    elif(dic['ref'] == ''):
        dic["refClass"] = "直接访问"
    else:
        dic["refClass"] = "外部链接"
    return dic

if __name__ == "__main__":
    spark = SparkSession \
        .builder \
        .appName("网站统计-来源分析") \
        .getOrCreate()

#  采用python库函数，自行解析json，数据读取正常
    jsonData = spark.sparkContext.textFile("main_pv.json")
    WebData = jsonData.map(json.loads).toDF()
    WebData.printSchema()

#  所有数据来源网站汇总 #4 Top10来源网站
    OverViewLinkSource = WebData.groupBy(WebData["ref"].alias("来源网站")).agg(
        count("_id").alias("浏览量(PV)"),
        countDistinct("u").alias("访客数(UV)"),
        countDistinct("ip").alias("IP数")
    ).orderBy(desc("浏览量(PV)"))

    #  为数据添加日期、小时
    WebDataWithDH = WebData.withColumn('day', from_unixtime("vt", format='yyyy-MM-dd')).withColumn('hour',from_unixtime("vt",format='HH'))

#  DataCube Create
#  没找到一个好的方式，生成新的一列指示网址来源的方式。只能通过最原始的map方法了
#  采用 .rdd.map(refClassIdentify).toDF() 的方法，当程序全量运行时程序出现异常，少量数据不报错，在 if(re.search('baidu',dic['ref'])) 处报错，不知道为何。
#  去掉此部分，原始数据读入后，再直接输出，全量运行无问题。
#  新增加一列需要重新找算法来做
#  最后只能通过创建两列来解决问题,success
    #  查看API，case when可以解决1列多值的枚举情况，代码更新待实验
    WebDataSource = WebDataWithDH.select(
        WebDataWithDH["day"],
        WebDataWithDH["hour"],
        WebDataWithDH["u"],
        WebDataWithDH["ip"],
        WebDataWithDH["ref"],
        WebDataWithDH["mi"]
    ).withColumn(
        "refClassS",
        WebDataWithDH["ref"].contains('baidu') |
        WebDataWithDH["ref"].contains('google') |
        WebDataWithDH["ref"].contains('bing.com') |
        WebDataWithDH["ref"].contains('so.com') |
        WebDataWithDH["ref"].contains('yahoo.com') |
        WebDataWithDH["ref"].contains('sougou.com')
    ).withColumn(
        "refClassD",
        WebDataWithDH["ref"] == ''
    )
#    ).rdd.map(refClassIdentify).toDF()

    WebDataSource.printSchema()

#  网页获取参数
#  考虑到参数需要更新，list参数换成字典参数
    AnalysisCondition = {'day':"day = '2017-12-26'", 'hour':'', "u":'', "ip": '', "ref":'', "refClass" : '', "mi" : ''}
#  来源类型和来源网站选择参数
    refAnalysis = ["refClassS","refClassD","ref"]

    WebDataSourceCube = WebDataSource.cube(
        WebDataSource["refClassS"],
        WebDataSource["refClassD"],
        WebDataSource["day"],
        WebDataSource["hour"],
        WebDataSource["u"],
        WebDataSource["ip"],
        WebDataSource["mi"],
        WebDataSource["ref"]
    ).count()

    ###################################################
    #  1.全部来源
    ###################################################
    WebDataSourceAll = WebDataSourceCube.filter(AnalysisCondition['day']) \
    .groupBy("day", "hour",refAnalysis[0],refAnalysis[1]) \
    .agg(
    max("count").alias("浏览量(PV)"),
    countDistinct("u").alias("访客数(UV)"),
    countDistinct("ip").alias("IP数")
    )

    ###################################################
    #  2.搜索引擎
    ###################################################
    WebDataSourceSearch = WebDataSourceCube.filter(AnalysisCondition['day']).filter("refClassS = 'true'") \
        .groupBy("day", "hour", refAnalysis[0], refAnalysis[1], refAnalysis[2]) \
        .agg(
        max("count").alias("浏览量(PV)"),
        countDistinct("u").alias("访客数(UV)"),
        countDistinct("ip").alias("IP数")
    )

    ###################################################
    #  3.搜索词
    ###################################################
    #如何从pv中提取搜索词，至今未知，此部分略过

    ###################################################
    #  3.外部链接
    ###################################################
    WebDataSourceOuterLink = WebDataSourceCube.filter(AnalysisCondition['day']).filter("refClassS = 'false'").filter("refClassD = 'false'") \
        .groupBy("day", "hour", refAnalysis[0], refAnalysis[1], refAnalysis[2]) \
        .agg(
        max("count").alias("浏览量(PV)"),
        countDistinct("u").alias("访客数(UV)"),
        countDistinct("ip").alias("IP数")
    )

    WebDataSourceAll.show()
    WebDataSourceSearch.show()
    WebDataSourceOuterLink.show()


    spark.stop()
