import os
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from cn.itcast.tag.base.BaseModel import BaseModel

SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python3'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON

# TODO 0.准备Spark开发环境(重复)
spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc:SparkContext=spark.sparkContext


# inType=Elasticsearch##esNodes=up01:9200##esIndex=tfec_tbl_orders##esType=_doc##selectFields=memberid,ordersn,orderamount,couponcodevalue
class PriceSenmodel(BaseModel):
    """价格敏感度模型类，用于计算用户对价格的敏感程度"""

    def getTagId(self):
        """获取标签ID

        Returns:
            int: 返回标签ID 50
        """
        return 50

    def compute(self, esdf:DataFrame, fivedf:DataFrame):
        """计算用户价格敏感度

        Args:
            esdf: 包含用户订单数据的DataFrame
            fivedf: 包含5级标签规则的DataFrame

        Returns:
            DataFrame: 包含用户ID和对应标签ID的结果DataFrame
        """
        # 展示输入数据样例
        esdf.show()
        fivedf.show()

        # 计算价格敏感度的三个关键指标：
        # 1. 优惠订单占比 = 优惠次数 / 购买次数
        # 2. 平均优惠金额占比 = (优惠总金额/优惠次数) / (订单应收总金额/购买次数)
        # 3. 优惠总金额占比 = 优惠总金额 / 订单应收总金额

        # 第一步：处理原始订单数据
        # 标记订单是否使用优惠券(0-未使用，1-使用)，并计算订单应收金额
        statecolum=F.when(esdf['couponcodevalue'].cast('int')==0.0,0).when(esdf['couponcodevalue'].cast('int')!=0.0,1).alias('state')
        orderamo=(esdf['couponcodevalue']+esdf['orderamount']).alias('orderamo')
        esdf1=esdf.select(statecolum,'couponcodevalue','memberid','orderamount',orderamo,'ordersn')

        # 第二步：按用户分组聚合计算关键指标
        discountcnt=F.sum(esdf1['state']).alias('discountcnt')  # 优惠次数
        purchasecnt=F.count(esdf1['state']).alias('purchasecnt')  # 购买次数
        sumdiscount=F.sum(esdf1['couponcodevalue']).alias('sumdiscount')  # 优惠总金额
        sumorderamount=F.sum(esdf1['orderamo']).alias('sumorderamount')  # 订单应收总金额
        esdf2=esdf1.groupby(esdf1['memberid']).agg(discountcnt,purchasecnt,sumdiscount,sumorderamount)

        # 第三步：过滤掉从未使用优惠券的用户，并计算三个敏感度指标
        esdf3 = esdf2.select(esdf2['memberid'], esdf2['discountcnt'], esdf2['purchasecnt'], esdf2['sumdiscount'],esdf2['sumorderamount']).where(esdf2['discountcnt']!=0)

        # 计算三个敏感度指标
        discountcnt_rate = (esdf3['discountcnt'] / esdf3['purchasecnt']).alias('discountcnt_rate')  # 优惠订单占比
        avgdiscount_rate=((esdf3['sumdiscount']/esdf3['discountcnt']) / (esdf3['sumorderamount'] / esdf3['purchasecnt'])).alias('avgdiscount_rate')  # 平均优惠金额占比
        sumdiscount_rate=(esdf3['sumdiscount'] / esdf3['sumorderamount']).alias('sumdiscount_rate')  # 优惠总金额占比

        esdf4:DataFrame=esdf3.select(esdf3['memberid'],discountcnt_rate,avgdiscount_rate,sumdiscount_rate)

        # 第四步：计算综合价格敏感度指标PSM（三个指标相加）
        psm=(esdf4['discountcnt_rate']+esdf4['avgdiscount_rate']+esdf4['sumdiscount_rate']).alias('psm')
        esdf5=esdf4.select(esdf4['memberid'],psm)
        esdf5.show()

        # 第五步：将PSM值转换为特征向量，供KMeans模型使用
        vectdf=VectorAssembler().setInputCols(['psm']).setOutputCol('feature').transform(esdf5)

        # 第六步：使用KMeans算法进行聚类分析
        # kMeans: KMeans = KMeans() \
        #     .setK(5) \  # 设置5个聚类中心
        #     .setSeed(10) \  # 设置随机种子
        #     .setMaxIter(2) \  # 设置最大迭代次数
        #     .setFeaturesCol('feature') \  # 设置特征列
        #     .setPredictionCol('predictstr')  # 设置预测结果列
        # model: KMeansModel = kMeans.fit(vectdf)
        kMeans:KMeans = KMeans() \
            .setK(5) \
            .setSeed(10) \
            .setMaxIter(2) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        model:KMeansModel=kMeans.fit(vectdf)

        # 第七步：获取聚类结果
        resultdf:DataFrame=model.transform(vectdf)

        # 第八步：获取聚类中心点并处理
        center=model.clusterCenters()

        # 将聚类中心转换为字典格式
        dict1 = {}
        for i in range(len(center)):
            dict1[i]=float(center[i][0])

        # 将聚类中心转换为DataFrame并排序
        list2=[[k,v] for (k,v) in dict1.items()]
        centerdf:DataFrame=spark.createDataFrame(list2,['predict','center'])
        centersortrdd=centerdf.rdd.sortBy(lambda x:x[1],ascending=False)

        # 第九步：将聚类中心与5级标签规则进行匹配
        temprdd=centersortrdd.union(sc.parallelize([]))
        unionrdd=temprdd.repartition(1).map(lambda x:x).zip(fivedf.rdd.repartition(1))

        # 第十步：创建预测结果到标签ID的映射字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()

        # 第十一步：将聚类结果映射为对应的标签ID
        newdf:DataFrame=resultdf.select(resultdf['memberid'].alias('userId'),udf(lambda x:fivedict[x],returnType=StringType())(resultdf['predictstr']).alias('tagsId'))
        newdf.show()

        return newdf

if __name__ == '__main__':
    priceSenmodel=PriceSenmodel(51)
    priceSenmodel.execute()