# -*- coding: UTF-8 -*-
"""
@File    ：综合案例.py
@Author  ：techPang
@Date    ：2025/2/8 17:14 
@Version ：1.0
"""
from pyspark import SparkContext, SparkConf

conf = SparkConf().setMaster("local[*]").setAppName("spark_test")
# 设置分区到一个文件中
conf.set("spark.default.parallelism", "1")
sc = SparkContext(conf=conf)
# 读取数据
rdd = sc.textFile("./search_log.txt")
# 热门时间段top3
rdd_date = rdd.map(lambda x: x.split("\t")[0]). \
    map(lambda x: x.split(":")[0]). \
    map(lambda x: (x, 1)). \
    reduceByKey(lambda x, y: x + y). \
    sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(3)
print(rdd_date)

# 热门搜索词top3
rdd_word = rdd.map(lambda x: x.split("\t")[2]). \
    map(lambda x: (x, 1)). \
    reduceByKey(lambda x, y: x + y). \
    sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(3)
print(rdd_word)

rdd_hm = rdd.filter(lambda x: x.split("\t")[2] == "黑马程序员"). \
    map(lambda x: x.split("\t")). \
    map(lambda x: (x[0], 1)). \
    reduceByKey(lambda x, y: x + y). \
    sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(1)

print(rdd_hm)

rdd.map(lambda x: x.split("\t")). \
    map(lambda x: {"time": x[0], "user_id:": x[1], "key_word": x[2], "rank1": x[3], "rank2": x[4], "url": x[5]}). \
    saveAsTextFile("./output_json")

sc.stop()
