from pyspark.sql import SparkSession
from pyspark.sql.functions import col, when, lit

# 定义 DateTime 类
class DateTime:
    def __init__(self, month, days):
        self.month = month
        self.days = days

def lag_feture(df, lags, col):
    result = df
    for i in lags:
        tmp = df.select("date_block_num", "entreport_id", "item_id", col)
        tmp = tmp.withColumnRenamed(col, f"{col}_lag_{i}")
        tmp = tmp.withColumn("date_block_num", col("date_block_num") + i)
        result = result.join(tmp, ["date_block_num", "entreport_id", "item_id"], "left")
    return result

if __name__ == "__main__":
    # 设置 Hadoop 用户
    import os
    os.environ["HADOOP_USER_NAME"] = "root"

    # 创建 SparkSession
    spark = SparkSession.builder \
        .appName("lightgbm") \
        .master("local[*]") \
        .getOrCreate()
    spark.sparkContext.setLogLevel("WARN")

    # 原始数据加载
    path = "hdfs://hadoop102:8020/data"
    salesTrain = spark.read.option("header", "true") \
        .option("inferSchema", "true") \
        .csv(path + "/sales_train.csv")

    items = spark.read.option("header", "true") \
        .option("inferSchema", "true") \
        .csv(path + "/new_items.csv")

    entreports = spark.read.option("header", "true") \
        .option("inferSchema", "true") \
        .csv(path + "/entreports.csv")

    itemCategoriesDF = spark.read.option("header", "true") \
        .option("inferSchema", "true") \
        .csv(path + "/item_categories.csv")

    # 仓库表拆分数据
    entreportsDF = entreports.rdd.map(lambda row: (
        row["entreport_name"].split(" ")[0],
        row["entreport_name"].split(" ")[1],
        row["id"]
    )).toDF(["entreport_city", "entreport_name", "entreport_id"])

    # 特征提取
    item_cnt_month = salesTrain.select("date_block_num", "entreport_id", "item_id", "item_cnt_day") \
        .groupBy("date_block_num", "entreport_id", "item_id") \
        .sum("item_cnt_day").withColumnRenamed("sum(item_cnt_day)", "item_cnt_month")

    mergeData = salesTrain \
        .join(item_cnt_month, ["date_block_num", "entreport_id", "item_id"], "left") \
        .join(items, ["item_id"], "left") \
        .join(itemCategoriesDF, ["item_category_id"], "left") \
        .join(entreportsDF, ["entreport_id"], "left")

    date_avg_item_cnt = mergeData.groupBy("date_block_num") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_avg_item_cnt")

    date_item_avg_item_cnt = mergeData.groupBy("date_block_num", "item_id") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_item_avg_item_cnt")

    date_entreport_avg_item_cnt = mergeData.groupBy("date_block_num", "entreport_id") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_entreport_avg_item_cnt")

    date_cat_avg_item_cnt = mergeData.groupBy("date_block_num", "item_category_id") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_cat_avg_item_cnt")

    date_cat_entreport_avg_item_cnt = mergeData.groupBy("date_block_num", "item_category_id", "entreport_id") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_cat_entreport_avg_item_cnt")

    date_type_avg_item_cnt = mergeData.groupBy("date_block_num", "type") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_type_avg_item_cnt")

    date_item_type_avg_item_cnt = mergeData.groupBy("date_block_num", "item_id", "type") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_item_type_avg_item_cnt")

    date_city_avg_item_cnt = mergeData.groupBy("date_block_num", "entreport_city") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_city_avg_item_cnt")

    date_item_city_avg_item_cnt = mergeData.groupBy("date_block_num", "entreport_city", "item_id") \
        .mean("item_cnt_month").withColumnRenamed("avg(item_cnt_month)", "date_item_city_avg_item_cnt")

    item_avg_item_price = mergeData.groupBy("item_id") \
        .mean("item_price").withColumnRenamed("avg(item_price)", "item_avg_item_price")

    date_item_avg_item_price = mergeData.groupBy("item_id", "date_block_num") \
        .mean("item_price").withColumnRenamed("avg(item_price)", "item_avg_item_price")

    # 获取月份
    mergeData = mergeData.withColumn("month", (col("date_block_num") % 12 + 1).cast("int"))

    # 每月天数
    dateTime = [
        DateTime(1, 31),
        DateTime(2, 28),
        DateTime(3, 31),
        DateTime(4, 30),
        DateTime(5, 31),
        DateTime(6, 30),
        DateTime(7, 31),
        DateTime(8, 31),
        DateTime(9, 30),
        DateTime(10, 31),
        DateTime(11, 30),
        DateTime(12, 31)
    ]
    dateDataFrame = spark.createDataFrame([(dt.month, dt.days) for dt in dateTime], ["month", "days"])

    # 特征整合
    mergeData = mergeData.join(dateDataFrame, ["month"], "left") \
        .join(date_avg_item_cnt, ["date_block_num"], "left") \
        .join(date_item_avg_item_cnt, ["date_block_num", "item_id"], "left") \
        .join(date_entreport_avg_item_cnt, ["date_block_num", "entreport_id"], "left") \
        .join(date_cat_avg_item_cnt, ["date_block_num", "item_category_id"], "left") \
        .join(date_cat_entreport_avg_item_cnt, ["date_block_num", "item_category_id", "entreport_id"], "left") \
        .join(date_type_avg_item_cnt, ["date_block_num", "type"], "left") \
        .join(date_item_type_avg_item_cnt, ["date_block_num", "item_id", "type"], "left") \
        .join(date_city_avg_item_cnt, ["date_block_num", "entreport_city"], "left") \
        .join(date_item_city_avg_item_cnt, ["date_block_num", "entreport_city", "item_id"], "left") \
        .join(item_avg_item_price, ["item_id"], "left") \
        .join(date_item_avg_item_price, ["item_id", "date_block_num"], "left")

    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_item_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_entreport_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_cat_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_cat_entreport_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_type_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_item_type_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_city_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_item_city_avg_item_cnt")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "item_avg_item_price")
    mergeData = lag_feture(mergeData, [1, 2, 3, 6, 12], "date_item_avg_item_price")

    # 丢弃特征
    columns_to_drop = [
        "date_avg_item_cnt",
        "date_item_avg_item_cnt",
        "date_entreport_avg_item_cnt",
        "date_cat_avg_item_cnt",
        "date_cat_entreport_avg_item_cnt",
        "date_type_avg_item_cnt",
        "date_item_type_avg_item_cnt",
        "date_city_avg_item_cnt",
        "date_item_city_avg_item_cnt",
        "item_avg_item_price",
        "date_item_avg_item_price"
    ]
    mergeData = mergeData.drop(*columns_to_drop)

    # 保存特征
    mergeData.coalesce(1).write.option("header", "true").csv(path + "/sample_file.csv")

    # 停止 SparkSession
    spark.stop()