import os
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType


# 用户对价格敏感的开发
# 2-服务器路径
from cn.itcast.tag.base.BaseModel import BaseModel


SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python3'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON


class PriceSenmodel(BaseModel):
    def compute(self, es_df, five_df):
        # 业务数据
        # es_df.show()
        # 五级标签
        # five_df.show()


        # 计算优惠订单占比、平均优惠金额占比、优惠总金额占比
        # *优惠订单占比 - -->    (优惠的次数 / 购买次数)
        # *平均优惠金额占比 - -->  (优惠金额平均数 / 平均每单应收金额)
        # *优惠金额平均数 = 优惠总金额 / 优惠的次数
        # *平均每单应收金额 = 订单应收总金额 / 购买次数
        # *优惠总金额占比 - -->   (优惠总金额 / 订单的应收总金额)
        # 需要计算的值:优惠次数,,购买次数,,优惠金额,,订单应收金额,,

        # 优惠订单状态
        statecolum = F.when(es_df['couponcodevalue'].cast('int') == 0.0, 0).when(
            es_df['couponcodevalue'].cast('int') != 0.0, 1).alias('state')
        # 订单金额（包含优惠金额）
        orderamo = (es_df['couponcodevalue']+es_df['orderamount']).alias('orderamo')
        # 选择需要的列
        es_df1 = es_df.select(statecolum,'couponcodevalue','memberid','orderamount', orderamo, 'ordersn')

        # 聚合计算
        discountcnt = F.sum(es_df1['state']).alias('discountcnt')
        purchasecnt = F.count(es_df1['state']).alias('purchasecnt')
        sumdiscount = F.sum(es_df1['orderamo']).alias('sumdiscount')
        sumorderamount = F.sum(es_df1['orderamount']).alias('sumorderamount')
        # 按用户ID分组聚合
        es_df2 = es_df1.groupBy('memberid').agg(discountcnt, purchasecnt, sumdiscount, sumorderamount)

        # 过滤掉优惠次数为0的用户
        esdf3 = es_df2.select(es_df2['memberid'], es_df2['discountcnt'], es_df2['purchasecnt'], es_df2['sumdiscount'],
                             es_df2['sumorderamount']).where(es_df2['discountcnt'] != 0)

        # 计算优惠订单占比、平均优惠金额占比、优惠总金额占比
        discountcnt_rate = (esdf3['discountcnt'] / esdf3['purchasecnt']).alias('discountcnt_rate')
        avgdiscount_rate = ((esdf3['sumdiscount'] / esdf3['discountcnt']) / (
                esdf3['sumorderamount'] / esdf3['purchasecnt'])).alias('avgdiscount_rate')
        sumdiscount_rate = (esdf3['sumdiscount'] / esdf3['sumorderamount']).alias('sumdiscount_rate')

        # 选择需要的列
        esdf4: DataFrame = esdf3.select(esdf3['memberid'], discountcnt_rate, avgdiscount_rate, sumdiscount_rate)
        esdf4.show()

        # 计算综合指标psm
        psm = (esdf4['discountcnt_rate'] + esdf4['avgdiscount_rate'] + esdf4['sumdiscount_rate']).alias('psm')
        esdf5 = esdf4.select(esdf4['memberid'], psm)
        esdf5.show()

        # 将psm转换为特征向量
        vectdf = VectorAssembler().setInputCols(['psm']).setOutputCol('feature').transform(esdf5)
        # vectdf.show()

        # 使用KMeans进行聚类
        kMeans: KMeans = KMeans() \
            .setK(5) \
            .setSeed(10) \
            .setMaxIter(2) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        model: KMeansModel = kMeans.fit(vectdf)

        # 根据模型预测结果
        resultdf = model.transform(vectdf)
        resultdf.show()

        # 获取聚类中心
        center = model.clusterCenters()

        # 将聚类中心转换为DataFrame
        dict1 = {}
        for i in range(len(center)):
            dict1[i] = float(center[i][0])

        list2 = [[k, v] for (k, v) in dict1.items()]
        centerdf = self.spark.createDataFrame(list2, ['predict', 'center'])

        centersortrdd = centerdf.rdd.sortBy(lambda x: x[1], ascending=False)


        # temprdd = centersortrdd.union(self.spark.parallelize([]))
        unionrdd = centersortrdd.repartition(1).map(lambda x: x).zip(five_df.rdd.repartition(1))

        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        # print(fivedict)

        newdf: DataFrame = resultdf.select(resultdf['memberid'].alias('userId'),
                                           udf(lambda x: fivedict[x], returnType=StringType())(
                                               resultdf['predictstr']).alias('tagsId'))
        newdf.show()

        return newdf
if __name__ == '__main__':
    psm = PriceSenmodel(51)
    psm.execute()
