# -*-coding:utf-8 -*-
# 由于我不是使用pycharm编程，直接把pyspark和py4j包放在了python目录下，无需引入即可编程
# 文件路劲：file////E:/PyCharm/PythonFolder/python_homework/export_file/汤学希+201827010320+数据源文件.txt

import os
from pyspark.sql import SparkSession
from pyspark.sql.types import Row

spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
loan_info = sc.textFile(
    "file////E:/PyCharm/PythonFolder/python_homework/export_file/汤学希+201827010320+数据源文件.txt"
)
# 分割,获取键值
loan_info_list = loan_info.map(lambda x: x.split(';'))


# 将数据组装
def f(x):
    data = {}
    data["bookname"] = x[0]
    data["author"] = x[1]
    data["type"] = x[2]
    return data


# 调用外部函数创建模式
loan_info_df = loan_info_list.map(lambda x: Row(**f(x))).toDF()
# 必须创建临时表，否则报错
loan_info_df.createOrReplaceTempView("ProInfo")
rows = spark.sql("select *from ProInfo")
# 这里单独把type抽离出来
typelist = rows.rdd.map(lambda x: x[2])
# type进行组装成(type,1)的形式
type1 = typelist.map(lambda x: (x, 1))
# 计算数量
type2 = type1.reduceByKey(lambda a, b: a + b)
# 进行排行榜设置
reduce = type2.sortBy(lambda x: x[1], ascending=False)
# 输出
list = reduce.collect()
# 存入文件
file = os.getcwd() + '/python_homework/export_file/汤学希+201827010320+排行榜结果.txt'
output = open(file, mode='a', encoding='utf-8')
output.write("起点中文网男生热门读物类型数量前十名排行榜"+'\n')
index = 0
for lll in list:
    if index < 10:
        outstr = '第'+str(index+1)+'名  '+str(lll[0]) + '类  共' + str(lll[1]) + '本\n'
        output.write(outstr)
        index = index + 1
    else:
        break
    # print(outstr)
output.close()
print("排行榜已输出")
