#!/usr/bin/env python




# @desc : 客户价值挖掘标签开发
# 基于用户订单行为数据进行K-Means聚类分析，划分客户价值等级
# 输入数据：用户订单数据(ES)、价值等级规则表
# 输出数据：用户ID与价值标签的映射关系

import os
import numpy as np
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from BaseModel import BaseModel

# SPARK_HOME = 'D:\\ProgramCj\\spark-2.4.8-bin-hadoop2.7'
# PYSPARK_PYTHON = 'D:\\ProgramCJ\\Python\\python'
# 2-服务器路径
SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON

spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc: SparkContext = spark.sparkContext


SPARK_HOME = 'D:\\ProgramCj\\spark-2.4.8-bin-hadoop2.7'
PYSPARK_PYTHON = 'D:\\ProgramCj\\Python\\python'
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON


class Valuemodel(BaseModel):
    #todo客户价值标签模型
    def getTagId(self):
        #todo返回标签ID
        return 37
    def compute(self, esdf: DataFrame, fivedf: DataFrame):
        """
        核心计算方法
        :param esdf: 从ES获取的用户订单数据
        :param fivedf: 价值等级规则表(5级到7级的映射关系)
        :return: 包含用户ID和标签ID的DataFrame
        """

        # 计算三个关键指标：
        # 最近购买天数 = 当前日期 - 末次购买日期
        # 累计消费金额
        # 订单总数

        #退后3年
        finishtimecolum = F.datediff(F.date_sub(F.current_date(), 980),
                                     F.from_unixtime(F.max(esdf['finishtime']))).alias('days')

        orderamountcolum = F.sum(esdf['orderamount']).alias('sumamount')
        ordersncolum = F.count(esdf['ordersn']).alias('cntsn')

        esdf1: DataFrame = esdf.groupby(esdf['memberid']).agg(finishtimecolum, orderamountcolum, ordersncolum)

        #todo对数据进行归一化让数据避免噪声过大
        finishtimescore = F.when((esdf1['days'] < 20), 5) \
            .when((esdf1['days'] >= 20) & (esdf1['days'] < 40), 4) \
            .when((esdf1['days'] >= 40) & (esdf1['days'] < 60), 3) \
            .when((esdf1['days'] >= 60) & (esdf1['days'] < 80), 2) \
            .when((esdf1['days'] >= 80), 1) \
            .otherwise(0) \
            .alias('days')

        orderamountscore = F.when((esdf1['sumamount'] >= 850000), 5) \
            .when((esdf1['sumamount'] >= 500000) & (esdf1['sumamount'] < 850000), 4) \
            .when((esdf1['sumamount'] >= 350000) & (esdf1['sumamount'] < 500000), 3) \
            .when((esdf1['sumamount'] >= 100000) & (esdf1['sumamount'] < 350000), 2) \
            .when((esdf1['sumamount'] < 100000), 1) \
            .otherwise(0) \
            .alias('sumamount')

        ordersnscore = F.when((esdf1['cntsn'] >= 440), 5) \
            .when((esdf1['cntsn'] >= 330) & (esdf1['cntsn'] < 440), 4) \
            .when((esdf1['cntsn'] >= 220) & (esdf1['cntsn'] < 330), 3) \
            .when((esdf1['cntsn'] >= 110) & (esdf1['cntsn'] < 220), 2) \
            .when((esdf1['cntsn'] < 110), 1) \
            .otherwise(0) \
            .alias('cntsn')

        esdfuni = esdf1.select(esdf['memberid'], finishtimescore, orderamountscore, ordersnscore)

        vector = VectorAssembler().setInputCols(['days', 'sumamount', 'cntsn']).setOutputCol('feature')
        vecesdf = vector.transform(esdfuni)
        #todo 设置K-Means参数：7个簇，最大迭代2次
        kMeans: KMeans = KMeans() \
            .setK(7) \
            .setSeed(10) \
            .setMaxIter(2) \
            .setFeaturesCol('feature') \
            .setPredictionCol('predictstr')
        #todo训练模型
        model: KMeansModel = kMeans.fit(vecesdf)
        #todo预测结果
        resultdf: DataFrame = model.transform(vecesdf)

        #todo获得聚类中心点
        center = model.clusterCenters()

        # todo 计算每个中心点的特征（用于排序） 是正序还是倒序
        list1 = [float(np.sum(x)) for x in center]

        #构建中心的索引，以及他的映射
        dict1 = {}
        for i in range(len(list1)):
            dict1[i] = list1[i]

        list2 = [[k, v] for (k, v) in dict1.items()]

        #todo数据类型进行转化 从list 转化为DataFrame
        centerdf: DataFrame = spark.createDataFrame(list2, ['predict', 'center'])

        # todo按中心点总和降序排序
        centersortrdd = centerdf.rdd.repartition(1).sortBy(lambda x: x[1], ascending=False)
        print("sort partition")
        centersortrdd.foreach(lambda x:print(x))

        #todo 合并聚类结果与规则表
        temprdd = centersortrdd.union(sc.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(fivedf.rdd.repartition(1))
        unionrdd.foreach(lambda x:print(x))

        #todo 构建预测结果到标签ID的映射字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        print(fivedict)
        #todo 最终结果DataFrame
        newdf: DataFrame = resultdf.select(resultdf['memberid'].alias('userId'),
                                           udf(lambda x: fivedict[x], returnType=StringType())(resultdf['predictstr']).alias('tagsId'))
        newdf.show()

        #输出
        return newdf


if __name__ == '__main__':

    valuemodel = Valuemodel(38)
    # 标签写入tags_result
    valuemodel.execute()