from pyspark import SparkContext
from pyspark.sql import SparkSession
import json
import os
from pyspark.sql.functions import col

os.environ['PYSPARK_PYTHON'] = r'D:\Anaconda\envs\pyspark\python.exe'


# 分类名称的总数量
def topClassName(sc, spark, df):
    j = df.groupBy('class_name').count().take(6)
    f = open('static/data/top-class-name.json', 'w', encoding="utf-8")
    f.write(json.dumps(j, ensure_ascii=False))
    f.close()


# 销量总重量最高的前10的单品编号
def topIdSales(sc, spark, df):
    top_years = df.select('item_id', 'sales_volume').rdd \
        .map(lambda v: (v.item_id, float(v.sales_volume))) \
        .reduceByKey(lambda x, y: x + y) \
        .sortBy(lambda x: x[1], ascending=False) \
        .take(10)

    f = open('static/data/top-id-sales.json', 'w')
    f.write(json.dumps(top_years))
    f.close()


# 销量总重量最高的前10的单品名称
def topNameSales(sc, spark, df):
    top_years = df.select('item_name', 'sales_volume').rdd \
        .map(lambda v: (v.item_name, float(v.sales_volume))) \
        .reduceByKey(lambda x, y: x + y) \
        .sortBy(lambda x: x[1], ascending=False) \
        .take(10)

    f = open('static/data/top-name-sales.json', 'w')
    f.write(json.dumps(top_years, ensure_ascii=False))
    f.close()


# 不同单品编号的历史平均单价分布
def itemPriceDistribution(sc, spark, df):
    item_price_distribution = df.select('item_id', 'sales_unit_price').groupBy('item_id').agg(
        {'sales_unit_price': 'mean'}).collect()

    # 转换为字典格式
    result = []
    for row in item_price_distribution:
        item_id = row['item_id']
        prices = row['avg(sales_unit_price)']
        result.append([item_id, prices])

    with open('static/data/id-mean-price.json', 'w') as f:
        json.dump(result, f, ensure_ascii=False)


# 统计是否打折
def countDiscounted(sc, spark, df):
    discounted_counts = df.groupBy('whether_discounted').count().collect()

    f = open('static/data/whether-discounted.json', 'w')
    f.write(json.dumps(discounted_counts, ensure_ascii=False))
    f.close()


# 统计同一item_name的sales_unit_price变化
def itemPriceVariation(sc, spark, df):
    price_variation = df.select('item_name', 'sales_unit_price').rdd \
        .map(lambda v: (v.item_name, [float(v.sales_unit_price)])) \
        .reduceByKey(lambda x, y: x + y) \
        .mapValues(lambda v: list(set(v))) \
        .collectAsMap()

    with open('static/data/name-unit-price.json', 'w') as f:
        f.write(json.dumps(price_variation, ensure_ascii=False, indent=2))


def countSalesByHour(sc, spark, df):
    # 从scanning_time中提取小时
    hour_counts = df.rdd.map(lambda row: (row.scanning_time[:2], 1)) \
        .reduceByKey(lambda x, y: x + y) \
        .sortByKey() \
        .collect()

    with open('static/data/scanning-time.json', 'w') as f:
        f.write(json.dumps(hour_counts, indent=2))


def codeNameSales(sc, spark, df):
    code_name_sales = df.select('class_code', 'item_name', col('sales_volume').cast('float')).groupBy('class_code',
                                                                                                      'item_name').sum(
        'sales_volume')
    code_name_sales_dict = code_name_sales.rdd.map(lambda x: (x[0], {x[1]: x[2]})).reduceByKey(
        lambda x, y: {**x, **y}).collectAsMap()

    with open('static/data/code-name.json', 'w') as f:
        f.write(json.dumps(code_name_sales_dict, ensure_ascii=False, indent=4))


if __name__ == "__main__":
    sc = SparkContext('local', 'test')
    sc.setLogLevel("WARN")
    spark = SparkSession.builder.getOrCreate()
    file = "data.csv"
    df = spark.read.csv(file, header=True)  # dataframe

    itemPriceVariation(sc, spark, df)
    countDiscounted(sc, spark, df)
    topNameSales(sc, spark, df)
    topClassName(sc, spark, df)
    topIdSales(sc, spark, df)
    itemPriceDistribution(sc, spark, df)
    countSalesByHour(sc, spark, df)
    codeNameSales(sc, spark, df)
