from pyspark.sql.functions import *

from bigdata.labels import Hive_process

if __name__ == '__main__':
    #消费周期
    hive = Hive_process.Hive_process()
    table_name = 'shopping.tbl_orders'
    orders_df = hive.read(table_name)
    # orders_df.printSchema()
    #展示日期
    # orders_df.show(truncate=False)
    # orders_df.printSchema()
    #找到第一次购买日期和第最后一次购买日期以及购买次数
    time_df = orders_df.groupby('memberId')\
    .agg(
        min('finishtime').alias('start_time'),
        max('finishtime').alias('end_time'),
        count('finishtime').alias('count')
    )
    #将unix格式转为普通格式
    consumption_time = time_df.select('memberId','count',datediff(from_unixtime('end_time'), from_unixtime('start_time')).alias('time_consumption'))
    consumption_cycle = consumption_time.select('memberId',(col('time_consumption')/col('count')).alias('time_consumption'))
    consumption_cycle.show()
