from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType




from cn.itcast.tag.base.BaseModel import BaseModel




"""
-------------------------------------------------
   Description :	TODO：年龄段标签重构代码
   SourceFile  :	AgeModel1
   Author      :	itcast team
-------------------------------------------------
"""




# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'




class AgeModel1(BaseModel):
    def compute(self, es_df, five_df):


        # 选择es_df中的id列，并将birthday列中的"-"替换为空字符串
        es_df = es_df.select("id", F.regexp_replace("birthday", "-", "").alias("birthday"))
        # 打印es_df的schema
        es_df.printSchema()
        # 显示es_df的内容
        es_df.show()


        # 选择five_df中的id列，并将rule列以"-"为分隔符拆分成start和end两列
        five_df = five_df.select("id",
                                 F.split("rule", "-")[0].alias("start"),
                                 F.split("rule", "-")[1].alias("end"))
        # 打印five_df的schema
        five_df.printSchema()
        # 显示five_df的内容
        five_df.show()


        # 将es_df和five_df进行左连接，连接条件是es_df的birthday列的值在five_df的start和end列之间
        new_df = es_df.join(other=five_df,
                               on=es_df['birthday'].between(five_df['start'], five_df['end']),
                               how='left').select(es_df['id'].cast(StringType()).alias("userId"),
                                                  five_df['id'].cast(StringType()).alias("tagsId"))
        return new_df








if __name__ == '__main__':
    ageModel = AgeModel1(14)
    ageModel.execute()