from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F

from cn.itcast.tag.base.BaseModel import BaseModel
from cn.itcast.tag.bean.ESMeta import ruleToESMeta

"""
-------------------------------------------------
   Description :	TODO：
   SourceFile  :	PriceModel
   Author      :	itcast team
-------------------------------------------------
"""

# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python3'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python3'

#todo 客单价
class PriceModel(BaseModel):
    def compute(self, es_df, five_df):
        """
        标签计算逻辑实现方法，用于根据用户职业信息匹配对应的标签ID

        Args:
            es_df: 业务数据，包含用户的实际行为或属性数据
            five_df: 五级标签规则数据，包含id和rule字段

        Returns:
            DataFrame: 计算结果，包含userId和tagsId两个字段
        """
        # 打印es_df的schema和示例数据，便于调试与确认数据结构
        es_df.printSchema()
        es_df.show()

        # 打印five_df的schema和示例数据，便于调试与确认标签规则数据
        five_df.printSchema()
        five_df.show()

        # 将业务数据es_df与五级标签规则five_df进行左连接：
        # 连接条件为es_df中的'orderamount'字段等于five_df中的'rule'字段
        # 选择输出字段：将es_df的'id'作为userId，five_df的'id'作为tagsId
        new_df = es_df.join(other=five_df,
                            on=es_df['orderamount'] == five_df['rule'],
                            how='left').select(es_df['id'].alias("userId"), five_df['id'].alias("tagsId"))
        return new_df


if __name__ == '__main__':
    ageModel = PriceModel(77)
    ageModel.execute()