import os
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType

# 用户对价格敏感的开发
# 2-服务器路径
from tag.base.BaseModel import BaseModel

SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON

class PriceSenmodel(BaseModel):
    def compute(self,es_df,five_df):
        # 业务数据
        """
        +---------------+--------+-----------+--------------------+
        |couponcodevalue|memberid|orderamount|             ordersn|
        +---------------+--------+-----------+--------------------+
        |            0.0|     200|     1196.0|suning_7879973204...|
        |            0.0|      26|     1439.0|amazon_7879653293...|
        |            0.0|      60|     1199.0|jd_14090314513387629|
        |            0.0|     412|     3460.0|gome_794761718752549|
        |            0.0|      29|     3298.0|suning_7872279096...|
        |            0.0|      13|     1039.0|amazon_7959052158...|
        |            0.0|     905|     1649.0|suning_7959975236...|
        |            0.0|     615|     1999.0|amazon_7879688460...|
        |            0.0|     614|      299.0|jd_14090314523784341|
        |            0.0|      82|      599.0|gome_795166749151282|
        |            0.0|     576|     1388.0|gome_795174100094339|
        |            0.0|     867|     1699.0|gome_787236142296762|
        |            0.0|     912|     2688.0|gome_795932415351890|
        |          200.0|      82|     2399.0|amazon_7951729027...|
        :param es_df: 
        :param five_df: 
        :return: 
        """
        es_df.show()
        # 5级标签数据
        five_df.show()

        # 影响价格敏感度的三个值:
        # 优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比
        # *优惠订单占比 - -->    (优惠的次数 / 购买次数)
        # *平均优惠金额占比 - -->  (优惠金额平均数 / 平均每单应收金额)
        # *优惠金额平均数 = 优惠总金额 / 优惠的次数
        # *平均每单应收金额 = 订单应收总金额 / 购买次数
        # *优惠总金额占比 - -->   (优惠总金额 / 订单的应收总金额)
        # 需要计算的值:优惠次数,,购买次数,,优惠金额,,订单应收金额,,
        
        # 处理业务数据
        # todo 1-处理esdf,标记优惠订单状态,,增加订单应收总金额列=couponcodevalue+orderamount
        statecolum = F.when(es_df['couponcodevalue'].cast('int') == 0.0, 0).when(
            es_df['couponcodevalue'].cast('int') != 0.0, 1).alias('state')
        orderamo = (es_df['couponcodevalue'] + es_df['orderamount']).alias('orderamo')
        es_df1 = es_df.select(statecolum, 'couponcodevalue', 'memberid', 'orderamount', orderamo, 'ordersn')
        es_df1.show()

        # todo 2-需要计算的值: 优惠次数discountcnt, , 购买次数purchasecnt,, 优惠金额sumdiscount,, 订单应收总金额sumorderamount,,
        discountcnt=F.sum(es_df1['state']).alias('discountcnt')
        purchasecnt=F.count(es_df1['state']).alias('purchasecnt')
        sumdiscount=F.sum(es_df1['couponcodevalue']).alias('sumdiscount')
        sumorderamount=F.sum(es_df1['orderamo']).alias('sumorderamount')
        esdf2=es_df1.groupby(es_df1['memberid']).agg(discountcnt,purchasecnt,sumdiscount,sumorderamount)
        esdf2.show()

        # todo 3-计算: 优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比
        # 过滤掉没有使用优惠码的用户
        esdf3 = esdf2.select(esdf2['memberid'], esdf2['discountcnt'], esdf2['purchasecnt'], esdf2['sumdiscount'],
                             esdf2['sumorderamount']).where(esdf2['discountcnt'] != 0)

        discountcnt_rate = (esdf3['discountcnt'] / esdf3['purchasecnt']).alias('discountcnt_rate')
        avgdiscount_rate = ((esdf3['sumdiscount'] / esdf3['discountcnt']) / (
                    esdf3['sumorderamount'] / esdf3['purchasecnt'])).alias('avgdiscount_rate')
        sumdiscount_rate = (esdf3['sumdiscount'] / esdf3['sumorderamount']).alias('sumdiscount_rate')

        esdf4: DataFrame = esdf3.select(esdf3['memberid'], discountcnt_rate, avgdiscount_rate, sumdiscount_rate)
        esdf4.show()

        # todo 4-计算: 根据优惠订单占比 + 平均优惠金额占比 + 优惠总金额占比,获取psm列
        # 三个值都是越大,用户价格敏感度越高,三个值相加
        psm = (esdf4['discountcnt_rate'] + esdf4['avgdiscount_rate'] + esdf4['sumdiscount_rate']).alias('psm')
        esdf5 = esdf4.select(esdf4['memberid'], psm)
        esdf5.show()

        # todo 5-聚类: 为方便后续模型进行特征输入，需要将psm的数据转换为特征向量
        vectdf = VectorAssembler().setInputCols(['psm']).setOutputCol('feature').transform(esdf5)
        # vectdf.show()
        # | memberid | psm | feature |
        # +--------+-------------------+--------------------+
        # | 13823489 | 0.10447058073727304 | [0.10447058073727... |
        # | 26 | 0.10715859273028262 | [0.10715859273028... |
        # | 4035177 | 0.0888618199782621 | [0.0888618199782621] |
        # | 29 | 0.1384530168585812 | [0.1384530168585812] |
        # | 4033555 | 0.3537033928754625 | [0.3537033928754625] |

        # todo 6-使用Kmeans算法对df进行训练
        kMeans: KMeans = KMeans() \
            .setK(5) \
            .setSeed(10) \
            .setMaxIter(2) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        model: KMeansModel = kMeans.fit(vectdf)

        # 根据模型预测结果
        resultdf = model.transform(vectdf)
        resultdf.show()

        # todo 7-获取聚类中心,加索引,排序,然后predictstr列跟fivedf合并
        center = model.clusterCenters()
        # [0.14113081,0.09675275,0.36030053,0.24467312,0.60442997]
        # [0,1,2,3,4]  确定一下聚类中心是否是跟类别的顺序一致

        # todo 8- 加索引,形成字典
        dict1 = {}
        for i in range(len(center)):
            dict1[i] = float(center[i][0])
        # {0: 0.141130811056604, 1: 0.09675274913886216, 2: 0.3603005265949139, 3: 0.24467312420217655,
        #  4: 0.6044299731603698}
        # todo 9- 转为列表,==>df==>rdd排序==>合并
        list2 = [[k, v] for (k, v) in dict1.items()]
        #[[0, 0.141130811056604], [1, 0.09675274913886216], [2, 0.3603005265949139], [3, 0.24467312420217655], [4, 0.6044299731603698]]
        centerdf = self.spark.createDataFrame(list2, ['predict', 'center'])
        # +-------+-------------------+
        # | predict | center |
        # +-------+-------------------+
        # | 0 | 0.141130811056604 |
        # | 1 | 0.09675274913886216 |
        # | 2 | 0.3603005265949139 |
        # | 3 | 0.24467312420217655 |
        # | 4 | 0.6044299731603698 |
        centersortrdd = centerdf.rdd.sortBy(lambda x: x[1], ascending=False)
        # [Row(predict=4, center=0.6044299731603698), Row(predict=2, center=0.3603005265949139),
        #  Row(predict=3, center=0.24467312420217655), Row(predict=0, center=0.141130811056604),
        #  Row(predict=1, center=0.09675274913886216)]

        # todo 10- 合并fivedf,,先union一个空rdd,再降分区,map,最后zip
        # temprdd = centersortrdd.union(self.spark.parallelize([]))
        unionrdd = centersortrdd.repartition(1).map(lambda x: x).zip(five_df.rdd.repartition(1))
        # # (Row(predict=4, center=0.6044299731603698), Row(id=52, rule='1'))
        #         # (Row(predict=2, center=0.3603005265949139), Row(id=53, rule='2'))
        #         # (Row(predict=3, center=0.24467312420217655), Row(id=54, rule='3'))
        #         # (Row(predict=0, center=0.141130811056604), Row(id=55, rule='4'))
        #         # (Row(predict=1, center=0.09675274913886216), Row(id=56, rule='5'))
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        # print(fivedict)
        # {4: 52, 2: 53, 3: 54, 0: 55, 1: 56}

        newdf: DataFrame = resultdf.select(resultdf['memberid'].alias('userId'),
                                           udf(lambda x: fivedict[x], returnType=StringType())(
                                               resultdf['predictstr']).alias('tagsId'))
        newdf.show()

        return newdf




if __name__ == '__main__':
    psm = PriceSenmodel(51)
    psm.execute()

