"""
    重构收入标签代码
"""
# 导入模块
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
from index_development.BaseModel.BaseModelAbstract import BaseModel
import pyspark.sql.functions as F


# 定义收入类
class incomeModel(BaseModel):
    # 重新 getTagId
    def getTagId(self):
        return 9
    # 重写 computer
    def compute(self, esDF: DataFrame, fiveDF: DataFrame, sc:SparkSession, spark:SparkContext):
        # esDF.show()
        # esDF.printSchema()

        # fiveDF.show()
        # fiveDF.printSchema()

        # 业务代码实现
        # setp1 5级标签的rule 拆成 'min_income','max_income'
        print('============== 5级标签的rule 拆成 min_income,max_income ==============')
        fiveDF2 = fiveDF.select(fiveDF['id'],
                                F.split(fiveDF['rule'], '-')[0].alias('min_income'),
                                F.split(fiveDF['rule'], '-')[1].alias('max_income'))
        fiveDF2.show()

        # setp2 判断 es数据中 income 在 ( min_income,max_income) 中打上对应的标签
        print('============== 判断 es数据中 income 在 ( min_income,max_income) 中打上对应的标签 ==============')
        newDF: DataFrame = esDF.join(fiveDF2) \
            .where(esDF['income'].between(fiveDF2['min_income'], fiveDF2['max_income'])) \
            .select(esDF['user_id'].alias('userId'), fiveDF2['id'].alias('tagsId'))
        newDF.show()
        newDF.printSchema()

        # 返回 newDF
        return newDF


# 程序入口
if __name__ == '__main__':
    # 创建对象
    income = incomeModel()
    # 调用方法 execute
    income.execute()
