# -*- coding:utf-8 -*-
from pyspark.sql import SparkSession

from config import db_schema, write_table, append_mode, properties
from mergesort import merge_sort

spark = SparkSession.builder.appName("readHiveThenStore").enableHiveSupport().getOrCreate()

# 获取表数据内容 hive表中有10000条数据
kylin_sales = spark.sql("select * from default.kylin_sales")

kylin_sales.show(100)
# 根据ops_region来分组统计
grouped_data = kylin_sales.groupBy("seller_id")

# 求出价格的seller的平均值
agv = grouped_data.avg('price')

# 获取平均值
new_dataFrame = agv.select("avg(price)", "seller_id").withColumnRenamed("seller_id", "ID").withColumnRenamed("avg(price)","NAME")
collect = new_dataFrame.collect()

# 排序前的顺序
for var in collect:
    print(var)

# 执行排序
sort_result = merge_sort(collect)
for var in sort_result:
    print(var)

# 保存至数据库
spark.createDataFrame(sort_result).write.jdbc(db_schema, write_table, append_mode, properties)
