"""
使用spark知识进行综合案例练习
"""
from pyspark import SparkConf, SparkContext

conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")
# 设置全局并行度为1
conf.set("spark.default.parallelism", "1")
sc = SparkContext(conf=conf)
# 读取文件数据
file_path = "../my_file/search_log.txt"
file_rdd = sc.textFile(file_path)
# print(file_rdd.collect())

# --------------需求1：热门搜索时间TOP3(小时精度)-------------------
# 1.1取出全部的时间并转换为小时
hour_rdd = file_rdd.flatMap(lambda line: line.split("\n")).map(lambda data: data.split("\t")[0].split(":")[0])
# 1.2转换为(小时,1)的二元元组
tuple_hour_count_rdd = hour_rdd.map(lambda hour: (hour, 1))
# 1.3key分组聚合value
key_group_rdd = tuple_hour_count_rdd.reduceByKey(lambda a, b: a + b)
# 1.4降序排序
hour_sort_rdd = key_group_rdd.sortBy(lambda x: x[1], ascending=False, numPartitions=1)
# 1.5取出前三
before_third_hour_list: list = hour_sort_rdd.take(3)
print(f"需求1结果为:{before_third_hour_list}")
# -----------------------------需求1链式实现-----------------------------
result_list_1_chain = file_rdd.map(lambda x: (x.split("\t")[0][:2], 1)). \
    reduceByKey(lambda a, b: a + b). \
    sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(3)
print(f"需求1链式实现的结果为:{result_list_1_chain}")

# -----------------需求2：热门搜索词top3-------------------------------
# 2.1取出所有的搜索词
search_word_rdd = file_rdd.flatMap(lambda line: line.split("\n")).map(lambda data: data.split("\t")[2])
# 2.2讲搜索词转换为(搜索词,1)的二元元组
search_word_tuple_rdd = search_word_rdd.map(lambda word: (word, 1))
# 2.3key分组聚合value
reduce_by_key_rdd = search_word_tuple_rdd.reduceByKey(lambda a, b: a + b)
# 2.4降序排序
sort_by_rdd = reduce_by_key_rdd.sortBy(lambda x: x[1], ascending=False, numPartitions=1)
# 2.5取出前三
result_list_2: list = sort_by_rdd.map(lambda x: x[0]).take(3)
print(f"需求2的结果为：{result_list_2}")
# -----------------需求2：链式实现-------------------------------
result_list_2_chain = file_rdd.map(lambda x: (x.split("\t")[2], 1)). \
    reduceByKey(lambda a, b: a + b). \
    sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(3)
print(f"需求2链式实现结果为:{result_list_2_chain}")
# ------------------需求3：统计黑马程序员在什么时间段被搜索的最多--------------------
# 3.1取出所有带黑马程序员的数据
keyword_rdd = file_rdd.flatMap(lambda line: line.split("\n")).filter(lambda data: data.split("\t")[2] == "黑马程序员")
# 3.2取出所有的时间(小时精度)
hour_rdd = keyword_rdd.map(lambda word: word.split("\t")[0].split(":")[0])
# 3.3转换为(小时,1)的二元元组
hour_count_rdd = hour_rdd.map(lambda hour_data: (hour_data, 1))
# 3.4key分组聚合value
reduce_by_key_rdd = hour_count_rdd.reduceByKey(lambda a, b: a + b)
# 3.5降序排序
sort_by_rdd = reduce_by_key_rdd.sortBy(lambda x: x[1], ascending=False, numPartitions=1)
# 3.6取出前三
result_list_3 = sort_by_rdd.take(3)
print(f"需求3的结果为：{result_list_3}")
# -----------------需求3：链式实现-------------------------------
result_list_3_chain = file_rdd.map(lambda x: x.split("\t")). \
    filter(lambda x: x[2] == '黑马程序员'). \
    map(lambda h: (h[0][:2], 1)). \
    reduceByKey(lambda a, b: a + b). \
    sortBy(lambda s: s[1], ascending=False, numPartitions=1). \
    take(3)
print(f"需求3链式实现的结果为：{result_list_3_chain}")
# ------------------需求4:将数据转换为json格式并写入到文件中---------------------
# 4.1转换为json格式的RDD
# 4.2写入文件
output_path = "../my_file/output4"
file_rdd.map(lambda line: line.split("\t")). \
    map(
    lambda x: {"time": x[0], "userId": x[1], "key_word": x[2], "rank1": x[3], "rank2": x[4], "url": x[5]}). \
    saveAsTextFile(output_path)