from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType

from cn.itcast.tag.base.BaseModel import BaseModel

"""
-------------------------------------------------
   Description :	TODO：消费水平段标签重构代码
   SourceFile  :	ConsumptionLevel
   Author      :	itcast team
-------------------------------------------------
"""

# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
class ConsumptionLevel(BaseModel):
    def compute(self, es_df, five_df):
        es_df = es_df.groupby("memberid").agg(F.sum("paidamount").alias("paidamount"))
        #业务数据处理
        es_df = es_df.select("memberid", F.regexp_replace("paidamount", "-", "").alias("paidamount"))

        es_df.printSchema()
        es_df.show()
        #标签规则数据处理
        five_df = five_df.select("id",
                                 F.split("rule", "-")[0].alias("start"),
                                 F.split("rule", "-")[1].alias("end"))
        five_df.printSchema()
        five_df.show()
        #打标签实现
        new_df = es_df.join(five_df, es_df['paidamount'].between(five_df['start'], five_df['end']), 'left') \
            .select(es_df['memberid'].alias("userId"), five_df['id'].alias("tagsId"))
        new_df.show()
        return new_df


if __name__ == '__main__':
    ageModel = ConsumptionLevel(83)
    ageModel.execute()



