from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F

from cn.itcast.tag.base.BaseModel import BaseModel
from cn.itcast.tag.bean.ESMeta import ruleToESMeta

"""
-------------------------------------------------
   Description :	TODO：
   SourceFile  :	PoliticalFaceModel
   Author      :	itcast team
-------------------------------------------------
"""

# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python3'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python3'

#todo 政治面貌模型
class PoliticalFaceModel(BaseModel):
    def compute(self, es_df, five_df):
        """
        执行政治面貌标签的计算逻辑
        
        参数:
            es_df: 从ES读取的业务数据，包含用户的政治面貌信息
            five_df: 从MySQL读取的五级标签规则数据
            
        返回:
            DataFrame: 包含userId和对应tagsId的标签计算结果
        """
        # 打印输入数据的schema和示例数据，用于调试和验证数据结构
        es_df.printSchema()
        es_df.show()
        five_df.printSchema()
        five_df.show()

        # 将业务数据es_df与五级标签规则five_df进行左连接
        # 连接条件为：es_df的politicalface字段等于five_df的rule字段
        # 选择输出字段：将es_df的id字段重命名为userId，将five_df的id字段重命名为tagsId
        new_df = es_df.join(five_df, es_df['politicalface'] == five_df['rule'], 'left') \
            .select(es_df['id'].alias("userId"), five_df['id'].alias("tagsId"))

        # 打印结果DataFrame的schema和示例数据，用于调试
        new_df.printSchema()
        new_df.show()

        # 返回最终的标签计算结果
        return new_df



if __name__ == '__main__':
    model = PoliticalFaceModel(62)
    model.execute()
