from pyspark.sql import SparkSession
from pyspark.ml.feature import VectorAssembler
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
import lightgbm as lgb

if __name__ == "__main__":
    # 设置 Hadoop 用户
    import os
    os.environ["HADOOP_USER_NAME"] = "root"

    # 创建 SparkSession
    spark = SparkSession.builder \
        .appName("test-lightgbm") \
        .master("local[*]") \
        .getOrCreate()
    spark.sparkContext.setLogLevel("WARN")

    hdfsPath = "hdfs://lgns/data"
    path = hdfsPath + "/sample_file.csv"
    originalDataSimple = spark.read.option("header", "true") \
        .option("inferSchema", "true") \
        .csv(path)

    # 离散特征
    conCols = [
        "entreport_id", "item_id", "item_category_id",
        "item_cnt_month_lag_1", "item_cnt_month_lag_2", "item_cnt_month_lag_3", "item_cnt_month_lag_6", "item_cnt_month_lag_12",
        "date_item_avg_item_cnt_lag_1", "date_item_avg_item_cnt_lag_2", "date_item_avg_item_cnt_lag_3", "date_item_avg_item_cnt_lag_6", "date_item_avg_item_cnt_lag_12",
        "date_entreport_avg_item_cnt_lag_1", "date_entreport_avg_item_cnt_lag_2", "date_entreport_avg_item_cnt_lag_3", "date_entreport_avg_item_cnt_lag_6", "date_entreport_avg_item_cnt_lag_12",
        "date_cat_avg_item_cnt_lag_1", "date_cat_avg_item_cnt_lag_2", "date_cat_avg_item_cnt_lag_3", "date_cat_avg_item_cnt_lag_6", "date_cat_avg_item_cnt_lag_12",
        "date_cat_entreport_avg_item_cnt_lag_1", "date_cat_entreport_avg_item_cnt_lag_2", "date_cat_entreport_avg_item_cnt_lag_3", "date_cat_entreport_avg_item_cnt_lag_6", "date_cat_entreport_avg_item_cnt_lag_12",
        "date_type_avg_item_cnt_lag_1", "date_type_avg_item_cnt_lag_2", "date_type_avg_item_cnt_lag_3", "date_type_avg_item_cnt_lag_6", "date_type_avg_item_cnt_lag_12",
        "date_item_type_avg_item_cnt_lag_1", "date_item_type_avg_item_cnt_lag_2", "date_item_type_avg_item_cnt_lag_3", "date_item_type_avg_item_cnt_lag_6", "date_item_type_avg_item_cnt_lag_12",
        "date_city_avg_item_cnt_lag_1", "date_city_avg_item_cnt_lag_2", "date_city_avg_item_cnt_lag_3", "date_city_avg_item_cnt_lag_6", "date_city_avg_item_cnt_lag_12",
        "date_item_city_avg_item_cnt_lag_1", "date_item_city_avg_item_cnt_lag_2", "date_item_city_avg_item_cnt_lag_3", "date_item_city_avg_item_cnt_lag_6", "date_item_city_avg_item_cnt_lag_12",
        "month", "days"
    ]

    # 类别特征
    cateCols = ["entreport_city", "entreport_name", "type", "subtype"]
    labelCol = "item_cnt_month"

    # 转换列类型
    for col in conCols:
        originalDataSimple = originalDataSimple.withColumn(col, originalDataSimple[col].cast("double"))
    originalDataSimple = originalDataSimple.withColumn(labelCol, originalDataSimple[labelCol].cast("int"))

    featureNames = cateCols + conCols
    assembler = VectorAssembler(inputCols=featureNames, outputCol="features")

    # 定义 LightGBM 回归器
    regressor = lgb.LGBMRegressor(
        num_iterations=100,
        num_leaves=20,
        boost_from_average=False,
        feature_fraction=0.3,
        max_depth=-1,
        max_bin=255,
        learning_rate=0.01,
        min_sum_hessian_in_leaf=0.001,
        lambda_l1=0.0,
        lambda_l2=0.0,
        bagging_fraction=0.9,
        bagging_freq=1,
        bagging_seed=0,
        objective="regression",
        early_stopping_rounds=50
    )

    # 划分训练集和验证集
    trainData = originalDataSimple.filter(originalDataSimple["date_block_num"] < 33)
    vaildData = originalDataSimple.filter(originalDataSimple["date_block_num"] == 33)

    # 创建管道
    pipeline = Pipeline(stages=[assembler, regressor])

    # 拟合模型
    model = pipeline.fit(trainData.toPandas())

    # 预测
    modelDF = model.transform(vaildData.toPandas())

    # 评估模型
    evaluator = RegressionEvaluator(
        labelCol=labelCol,
        metricName="rmse",
        predictionCol="prediction"
    )
    rmse = evaluator.evaluate(spark.createDataFrame(modelDF))
    print(f"rmse: {rmse}")
    print("*****************************")

    # 模型保存
    model.stages[1].booster_.save_model(hdfsPath + "/model/regressionModel.txt")

    # 加载模型预测
    loaded_model = lgb.Booster(model_file=hdfsPath + "/model/regressionModel.txt")
    # 这里可以添加预测代码

    # 停止 SparkSession
    spark.stop()