from ctypes import Array

from pyspark.ml.evaluation import ClusteringEvaluator
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans
from pyspark.sql.functions import *

from bigdata.Personas import Hive_process
from pyspark.sql.types import *
if __name__ == '__main__':
    #用户消费能力
    hive = Hive_process.Hive_process()
    table_name = 'shopping.tbl_goods'
    goods_df = hive.read(table_name)
    orders_df = hive.read('shopping.tbl_orders')
    #两表联合
    goods_orders_df = goods_df.join(orders_df,orders_df['ordersn'] == goods_df['cordersn'])

    #zepplin代码，用于统计消费金额,消费频率，最后一次消费时间
    # hcx = HiveContext(sparkContext=spark.sparkContext)
    # orders_df = hcx.table('shopping.tbl_orders')
    # goods_df = hcx.table('shopping.tbl_goods')
    # goods_orders_df = goods_df.join(orders_df, orders_df['ordersn'] == goods_df['cordersn'])
    #
    # consumption_df =goods_orders_df.groupby('memberId')\
    #     .agg(
    #         sum('productAmount').alias('total_consumption'),
    #         max('finishtime').alias('last_time'),
    #         count('price').alias('frequency')
    #     )
    #统计最后一次购买时间，为unixtime
    def time_score(data):
        if data >= 1566048786:
            return 5
        elif data >= 1566040352:
            return 4
        elif data >= 1566034878:
            return 3
        elif data >= 1566012799:
            return 2
        else:
            return 1

    #得到消费频率对应为： >145:5 ; >138:4 ; >129:3;  >121:2;
    def frequency_score(data):
        if data >= 145:
            return 5
        elif data >= 138:
            return 4
        elif data >=129:
            return 3
        elif data >= 121:
            return 2
        else:
            return 1

    #得到用户消费金额对应分数； >29000:5 ;>25000:4 ; >23000:3;  >20000:2;
    def consumption_score(data):
        if data >= 290000:
            return 5
        elif data >= 250000:
            return 4
        elif data >=230000:
            return 3
        elif data >= 200000:
            return 2
        else:
            return 1

    consumption_df =goods_orders_df.groupby('memberId')\
    .agg(
        sum('productAmount').alias('total_consumption'),
        max('finishtime').alias('last_time'),
        count('price').alias('frequency')
    )

    #将数据转换为对应分数
    frequency_udf = udf(frequency_score, IntegerType())
    time_udf = udf(time_score, IntegerType())
    consumption_udf = udf(consumption_score,IntegerType())
    #时间格式是str，转为Integer
    consumption_df = consumption_df.withColumn('last_time',consumption_df['last_time'].cast('int'))
    consumption_df = consumption_df.withColumn('total_consumption',consumption_udf(consumption_df['total_consumption']))\
    .withColumn('frequency',frequency_udf(consumption_df['frequency']))\
    .withColumn('last_time',time_udf(consumption_df['last_time']))

    consumption_df = consumption_df.select('memberId','total_consumption','last_time','frequency')

    vectorDF = VectorAssembler()\
    .setInputCols(['total_consumption','last_time','frequency'])\
    .setOutputCol('feature')\
    .transform(consumption_df)

    silhouette_score = []
    #kmeans训练
    kmeans = KMeans()\
    .setK(7)\
    .setSeed(100)\
    .setMaxIter(2)\
    .setFeaturesCol('feature')\
    .setPredictionCol('predict')
    model = kmeans.fit(vectorDF)
    predicted = model.transform(vectorDF)
    predicted.show(10)

