# @desc : 实现价格敏感度挖掘标签开发
__coding__ = "utf-8"
__author__ = "it1 team"

import os

from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType

from cn.tag.base.BaseModelES import BaseModelAbstract
# 2-服务器路径
SPARK_HOME = '/export/server/spark-2.4.8-bin-hadoop2.7'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON

# TODO 0.准备Spark开发环境(重复)
spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc:SparkContext=spark.sparkContext

class PriceMo(BaseModelAbstract):

    def getTagId(self):
        return 51

    def compute(self, esdf: DataFrame, fivedf: DataFrame):
        esdf.show()

        fivedf.show()

        # todo 1-处理esdf,标记优惠订单状态,,增加订单应收总金额列=couponcodevalue+orderamount
        # 定义订单状态列
        statecolum = F.when(esdf['couponcodevalue'].cast('int') == 0.0, 0). \
            when(esdf['couponcodevalue'].cast('int') != 0.0, 1).alias('state')
        orderamo = (esdf['couponcodevalue'] + esdf['orderamount']).alias('orderamo')
        esdf1 = esdf.select(statecolum, 'couponcodevalue', 'memberid', 'orderamount', orderamo, 'ordersn')

        # todo 2-需要计算的值: 优惠次数discountcnt, , 购买次数purchasecnt,, 优惠金额sumdiscount,, 订单应收总金额sumorderamount,,
        discountcnt = F.sum(esdf1['state']).alias('discountcnt')
        purchasecnt = F.count(esdf1['state']).alias('purchasecnt')
        sumdiscount = F.sum(esdf1['couponcodevalue']).alias('sumdiscount')
        sumorderamount = F.sum(esdf1['orderamo']).alias('sumorderamount')
        esdf2 = esdf1.groupby(esdf1['memberid']).agg(discountcnt, purchasecnt, sumdiscount, sumorderamount)

        # todo 3-计算: 优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比
        # 过滤掉没有使用优惠码的用户
        esdf3 = esdf2.select(esdf2['memberid'], esdf2['discountcnt'], esdf2['purchasecnt'], esdf2['sumdiscount'],
                             esdf2['sumorderamount']).where(esdf2['discountcnt'] != 0)

        discountcnt_rate = (esdf3['discountcnt'] / esdf3['purchasecnt']).alias('discountcnt_rate')
        avgdiscount_rate = ((esdf3['sumdiscount'] / esdf3['discountcnt']) / (
                    esdf3['sumorderamount'] / esdf3['purchasecnt'])).alias('avgdiscount_rate')
        sumdiscount_rate = (esdf3['sumdiscount'] / esdf3['sumorderamount']).alias('sumdiscount_rate')

        esdf4: DataFrame = esdf3.select(esdf3['memberid'], discountcnt_rate, avgdiscount_rate, sumdiscount_rate)

        # todo 4-计算: 根据优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比,获取psm列
        # 三个值都是越大,用户价格敏感度越高,三个值相加
        psm = (esdf4['discountcnt_rate'] + esdf4['avgdiscount_rate'] + esdf4['sumdiscount_rate']).alias('psm')
        esdf5 = esdf4.select(esdf4['memberid'], psm)
        esdf5.show()


        # todo 5-聚类: 为方便后续模型进行特征输入，需要将psm的数据转换为特征向量
        vectdf = VectorAssembler().setInputCols(['psm']).setOutputCol('feature').transform(esdf5)


        # todo 6-使用Kmeans算法对df进行训练
        kMeans: KMeans = KMeans() \
            .setK(5) \
            .setSeed(10) \
            .setMaxIter(20) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        model: KMeansModel = kMeans.fit(vectdf)
        # 根据模型预测结果
        resultdf: DataFrame = model.transform(vectdf)


        # todo 7-获取聚类中心,加索引,排序,然后predictstr列跟fivedf合并
        center = model.clusterCenters()


        # todo 8- 加索引,形成字典
        dict1 = {}
        for i in range(len(center)):
            dict1[i] = float(center[i][0])


        # todo 9- 转为列表,==>df==>rdd排序==>合并
        list2 = [[k, v] for (k, v) in dict1.items()]

        centerdf: DataFrame = spark.createDataFrame(list2, ['predict', 'center'])


        centersortrdd = centerdf.rdd.sortBy(lambda x: x[1], ascending=False)


        # todo 10- 合并fivedf,,先union一个空rdd,再降分区,map,最后zip
        temprdd = centersortrdd.union(sc.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(fivedf.rdd.repartition(1))


        # todo 11- 合并后的rdd,取predict列和fivedf的id列,形成字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()


        # todo 12- 根据predict列和id列的对应关系,,匹配resultdf和fivedf
        newdf: DataFrame = resultdf.select(resultdf['memberid'].alias('userId'),
                                           udf(lambda x: fivedict[x], returnType=StringType())(
                                               resultdf['predictstr']).alias('tagsId'))
        newdf.show()

        return newdf



if __name__ == '__main__':
    priceMo = PriceMo()
    priceMo.execute()
