'''
@Project --> File: InsuranceUserprofile -> BaseModelPeriod
@IDE: PyCharm
@Author: Burke
@Date: 2023/6/20
'''

from pyspark.sql import SparkSession, DataFrame,functions as F

import os

# 明确指定pyspark使用的python环境
from pyspark.sql.functions import udf

from index_development.BaseModel.BaseModelAbstract import BaseModel

os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/bin/python3'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/bin/python3'

spark = SparkSession \
    .builder \
    .appName('BaseModelAbstract') \
    .master('local[*]') \
    .enableHiveSupport()\
    .getOrCreate()

sc = spark.sparkContext


class ConsumptionCycle(BaseModel):

    def getTagId(self):
        # 区域标签id为11
        return 12
    def compute(self, esDF: DataFrame, fiveDF: DataFrame):

        print('================= 开始处理fiveDF ====================')
        fiveDF : DataFrame = fiveDF.select(fiveDF.id.alias('tagsId')
                                           ,F.split(fiveDF.rule,'-')[0].alias('start')
                                           ,F.split(fiveDF.rule,'-')[1].alias('end')
                                                    )
        fiveDF.show()


        print('================= 开始处理EsDF ====================')
        '''
            购买周期：保单购买时间距离当前时间天数差
        '''
        esDF = esDF.select(esDF.user_id.alias('userId'),F.datediff(F.date_sub(F.current_date(),365)
                                                    ,F.date_format(esDF.buy_datetime,'yyyy-MM-dd')
                                                   ).alias('days')).orderBy(esDF.user_id)
        esDF.printSchema()
        esDF.show(truncate=False)

        result : DataFrame = esDF.join(fiveDF,on=esDF.days.between(fiveDF.start,fiveDF.end),how='left')\
                                    .select(esDF.userId.cast('int'),fiveDF.tagsId.cast('int')) \
                                    .where('tagsId is not null')\
                                    .orderBy(esDF.userId)


        # result.where(result.tagsId != '47').show()

        result.createTempView('t1')

        sql = """
            select userId, cast(collect_set(tagsId),'string')  from t1 group by userId
        """
        result2 = spark.sql(sql)

        # result2 = result.groupBy(result.userId).concat_ws(',',result.tagsId)
        # result2 = result.groupBy(result.userId).agg(collect_set(result.tagsId))
        # result2.show()

        return result2


if __name__ == '__main__':
    consumptioncycle = ConsumptionCycle()
    # pid = 12
    consumptioncycle.execute()
