"""
    Author: Rain
    Time: 2023/6/20 22:00
    File: pppModel.py
    IDE: PyCharm
"""
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import udf

from index_development.BaseModel.BaseModelAbstract import BaseModel





class PPPModel(BaseModel):
    # TODO 1.获取缴费期标签id
    def getTagId(self):
        return 13

    # TODO 2.结合五级标签给业务打上标签, 获取newDF
    def compute(self, esDF: DataFrame, fiveDF: DataFrame, sc: SparkSession, spark: SparkContext):
        # print("===============1.esDF===============")
        # esDF.printSchema()
        # esDF.show()
        """
        +---+-------+
        |ppp|user_id|
        +---+-------+
        | 30|  1-422|
        | 30|  1-422|
        | 10|  1-422|
        """
        # print("===============2.fiveDF===============")
        # fiveDF.printSchema()
        # fiveDF.show()
        """
        +---+----+
        | id|rule|
        +---+----+
        | 48|  10|
        | 49|  15|
        | 50|  20|
        | 51|  30|
        +---+----+
        """
        print("===============将五级标签转化成字典===============")
        fiveRuleDict = fiveDF.rdd.map(lambda row: (row["rule"], row["id"])).collectAsMap()
        # print(fiveRuleDict) # {'10': 48, '15': 49, '20': 50, '30': 51}
        # 将fiveRuleDict声明为广播变量
        broatcastDict = sc.broadcast(fiveRuleDict)

        @udf
        def pppToTags(ppp:str)->str:
            return broatcastDict.value[str(ppp)]

        newDF:DataFrame = esDF.select(esDF["user_id"].alias("userId"), pppToTags(esDF["ppp"]).alias("tagsId"))
        newDF.show()


if __name__ == '__main__':
    ppp_model = PPPModel()
    ppp_model.execute()