#  实现价格敏感度挖掘标签开发
#  PSM模型
# #影响价格敏感度的三个值:
        # 优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比
        # *优惠订单占比 - -->    (优惠的次数 / 购买次数)

        # *平均优惠金额占比 - -->  (优惠金额平均数 / 平均每单应收金额)
        # *优惠金额平均数 = 优惠总金额 / 优惠的次数
        # *平均每单应收金额 = 订单应收总金额 / 购买次数


        # *优惠总金额占比 - -->   (优惠总金额 / 订单的应收总金额)
        #需要计算的值:优惠次数,,购买次数,,优惠金额,,订单应收金额,,

from pyspark.sql import functions as F
from base.BaseModel import BaseModel
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
class PriceSenmodel(BaseModel):
    def compute(self,es_df,five_df):
        es_df.show()
        five_df.show()
        # todo 1-处理esdf,标记优惠订单状态,,增加订单应收总金额列=couponcodevalue+orderamount
        # 定义订单状态列
        statecolum = F.when(es_df['couponcodevalue'].cast('int') == 0.0, 0).when(
            es_df['couponcodevalue'].cast('int') != 0.0, 1).alias('state')
        orderamo = (es_df['couponcodevalue'] + es_df['orderamount']).alias('orderamo')
        esdf1 = es_df.select(statecolum, 'couponcodevalue', 'memberid', 'orderamount', orderamo, 'ordersn')
        esdf1.show()

        # todo 2-需要计算的值: 优惠次数discountcnt, , 购买次数purchasecnt,, 优惠金额sumdiscount,, 订单应收总金额sumorderamount,,
        discountcnt = F.sum(esdf1['state']).alias('discountcnt')
        purchasecnt = F.count(esdf1['state']).alias('purchasecnt')
        sumdiscount = F.sum(esdf1['couponcodevalue']).alias('sumdiscount')
        sumorderamount = F.sum(esdf1['orderamo']).alias('sumorderamount')
        esdf2 = esdf1.groupby(esdf1['memberid']).agg(discountcnt, purchasecnt, sumdiscount, sumorderamount)
        esdf2.show()

        # todo 3-计算: 优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比
        # 过滤掉没有使用优惠码的用户
        esdf3 = esdf2.select(esdf2['memberid'], esdf2['discountcnt'], esdf2['purchasecnt'], esdf2['sumdiscount'],
                             esdf2['sumorderamount']).where(esdf2['discountcnt'] != 0)

        discountcnt_rate = (esdf3['discountcnt'] / esdf3['purchasecnt']).alias('discountcnt_rate')
        avgdiscount_rate = ((esdf3['sumdiscount'] / esdf3['discountcnt']) / (
                    esdf3['sumorderamount'] / esdf3['purchasecnt'])).alias('avgdiscount_rate')
        sumdiscount_rate = (esdf3['sumdiscount'] / esdf3['sumorderamount']).alias('sumdiscount_rate')

        esdf4 = esdf3.select(esdf3['memberid'], discountcnt_rate, avgdiscount_rate, sumdiscount_rate)
        esdf4.show()

        # todo 4-计算: 根据优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比,获取psm列
        # 三个值都是越大,用户价格敏感度越高,三个值相加
        psm = (esdf4['discountcnt_rate'] + esdf4['avgdiscount_rate'] + esdf4['sumdiscount_rate']).alias('psm')
        esdf5 = esdf4.select(esdf4['memberid'], psm)
        esdf5.show()

        # todo 5-聚类: 为方便后续模型进行特征输入，需要将psm的数据转换为特征向量
        vectdf = VectorAssembler().setInputCols(['psm']).setOutputCol('feature').transform(esdf5)
        vectdf.show()
        # todo 6-使用Kmeans算法对df进行训练

        """
        +--------+-------------------+--------------------+----------+
|memberid|                psm|             feature|predictstr|
+--------+-------------------+--------------------+----------+
|     200|0.08200635651925249|[0.08200635651925...|         0|
|      26|0.11176815057894743|[0.11176815057894...|         4|
|      29|0.16062349637644827|[0.16062349637644...|         2|
|     229|0.12112216858034948|[0.12112216858034...|         4|
|     452|0.07270575459317065|[0.07270575459317...|         0|
|     248|0.17218154041408282|[0.17218154041408...|         2|
|     474|0.10793786884264242|[0.10793786884264...|         4|
|      15| 0.2022005280254466|[0.2022005280254466]|         2|
|      92|0.12083979596836006|[0.12083979596836...|         4|
|     509|0.24967703868963032|[0.24967703868963...|         1|
|     305| 0.0792592136791093|[0.0792592136791093]|         0|
|     864|0.13931694283894644|[0.13931694283894...|         2|
|     151|0.06010068273908173|[0.06010068273908...|         0|
|     670|0.12496892161134673|[0.12496892161134...|         4|
|     419|0.13697453611337598|[0.13697453611337...|         4|
|      12| 0.1259066924699557|[0.1259066924699557]|         4|
|      42| 0.1044223188628492|[0.1044223188628492]|         4|
|     702|0.12216806615675566|[0.12216806615675...|         4|
|     237|0.11022665647017389|[0.11022665647017...|         4|
|     257|0.14343047056143576|[0.14343047056143...|         2|
+--------+-------------------+--------------------+----------+
        
        """
        kMeans = KMeans() \
            .setK(5) \
            .setSeed(10) \
            .setMaxIter(2) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        model = kMeans.fit(vectdf)
        # 根据模型预测结果
        resultdf = model.transform(vectdf)
        resultdf.show()

        # [array([0.14113081]), array([0.09675275]), array([0.36030053]), array([0.24467312]), array([0.60442997])]
        # [0.14113081][0.09675275][0.36030053][0.24467312][0.60442997]
        # todo 7-获取聚类中心,加索引,排序,然后predictstr列跟fivedf合并
        center = model.clusterCenters()

        # {0: 0.141130811056604, 1: 0.09675274913886216, 2: 0.3603005265949139, 3: 0.24467312420217655,
        #  4: 0.6044299731603698}
        # todo 8- 加索引,形成字典
        dict1 = {}
        for i in range(len(center)):
            dict1[i] = float(center[i][0])

        # todo 9- 转为列表,==>df==>rdd排序==>合并
        list2 = [[k, v] for (k, v) in dict1.items()]
        # print(list2)
        # [[0, 0.141130811056604], [1, 0.09675274913886216], [2, 0.3603005265949139], [3, 0.24467312420217655], [4, 0.6044299731603698]]
        centerdf = self.spark.createDataFrame(list2, ['predict', 'center'])
        centerdf.show()

        centersortrdd = centerdf.rdd.sortBy(lambda x: x[1], ascending=False)
        print(centersortrdd.collect())
        # [Row(predict=4, center=0.6044299731603698), Row(predict=2, center=0.3603005265949139),
        #  Row(predict=3, center=0.24467312420217655), Row(predict=0, center=0.141130811056604),
        #  Row(predict=1, center=0.09675274913886216)]

        # todo 10- 合并fivedf,,先union一个空rdd,再降分区,map,最后zip
        temprdd = centersortrdd.union(self.spark.sparkContext.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(five_df.rdd.repartition(1))
        unionrdd.foreach(lambda x:print(x))
        # (Row(predict=4, center=0.6044299731603698), Row(id=52, rule='1'))
        # (Row(predict=2, center=0.3603005265949139), Row(id=53, rule='2'))
        # (Row(predict=3, center=0.24467312420217655), Row(id=54, rule='3'))
        # (Row(predict=0, center=0.141130811056604), Row(id=55, rule='4'))
        # (Row(predict=1, center=0.09675274913886216), Row(id=56, rule='5'))

        # todo 11- 合并后的rdd,取predict列和fivedf的id列,形成字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        print(fivedict)
        # #{4: 52, 2: 53, 3: 54, 0: 55, 1: 56}

        newdf = resultdf.select(resultdf['memberid'].alias('userId'),udf(lambda x: fivedict[x], returnType=StringType())(resultdf['predictstr']).alias('tagsId'))
        newdf.show()
        return  newdf
if __name__ == '__main__':
    ps = PriceSenmodel(51)
    ps.execute()