# 文件接收
from pyspark import SparkConf,SparkContext
import os
os.environ["PYSPARK_PYTHON"] = "D:/Soft/Python/Python310/python.exe"
os.environ["HADOOP_HOME"]="D:/Soft/Python/hadoop-3.0.0"

if __name__ == '__main__':
    conf = SparkConf().setMaster("local[*]").setAppName("test")
    conf.set("spark.default.parallelism", "1") # 仅设置1个分片，也可在生成rdd的时候指定
    sc = SparkContext(conf=conf)

    file_rdd=sc.textFile("data/search_log.txt")
    # 热门搜索时间段（小时精度）TOP3
    result1=file_rdd.map(lambda ele:(ele.split("\t")[0][:2],1))\
        .reduceByKey(lambda a,b:a+b)\
        .sortBy(lambda ele:ele[1],ascending=False,numPartitions=1)\
        .take(3)
    print(result1)

    # 热门搜索词TOP3
    result2 = file_rdd.map(lambda ele: (ele.split("\t")[2], 1)) \
        .reduceByKey(lambda a, b: a + b) \
        .sortBy(lambda ele: ele[1], ascending=False, numPartitions=1) \
        .take(3)
    print(result2)

    # 统计黑马程序员关键字在哪个时段被搜索最多
    result3=file_rdd.map(lambda ele:ele.split("\t"))\
        .filter(lambda ele:ele[2]=="黑马程序员")\
        .map(lambda ele:(ele[0][:2],1))\
        .reduceByKey(lambda a,b:a+b)\
        .reduce(lambda a,b:(a if a[1]>b[1] else b))
    print(result3)


    sc.stop()