import os
import numpy as np
import pyspark.sql.functions as F
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import StringType
from pyspark.sql import SparkSession, types as T  # 修改导入
from tag.base.baseModel import BaseModel
# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

# TODO 0.准备Spark开发环境(重复)
spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc = spark.sparkContext

class CustomerValue(BaseModel):
    def compute(self, es_df, five_df):
        finishtimecolum = F.datediff(F.date_sub(F.current_date(), 980),
                                     F.from_unixtime(F.max(es_df['finishtime']))).alias('days')
        orderamountcolum = F.sum(es_df['orderamount']).alias('sumamount')
        ordersncolum = F.count(es_df['ordersn']).alias('cntsn')
        es_df1= es_df.groupby(es_df['memberid']).agg(finishtimecolum, orderamountcolum, ordersncolum)

        # es_df1.show()
        #对上述操作进行归一化
        finishtimescore=F.when((es_df1['days']<20),5) \
                         .when((es_df1['days']>=20) & (es_df1['days']<40),4) \
                         .when((es_df1['days']>=40) & (es_df1['days']<60),3) \
                         .when((es_df1['days']>=60) & (es_df1['days']<80),2) \
                         .when((es_df1['days']>=80),1) \
                         .otherwise(0).alias("days")

        orderamountscore = F.when((es_df1['sumamount'] >= 850000), 5) \
                            .when((es_df1['sumamount'] >= 500000) & (es_df1['sumamount'] < 850000), 4) \
                            .when((es_df1['sumamount'] >= 350000) & (es_df1['sumamount'] < 500000), 3) \
                            .when((es_df1['sumamount'] >= 100000) & (es_df1['sumamount'] < 350000), 2) \
                            .when((es_df1['sumamount'] < 100000), 1) \
                            .otherwise(0).alias('sumamount')

        ordersnscore = F.when((es_df1['cntsn'] >= 440), 5) \
            .when((es_df1['cntsn'] >= 330) & (es_df1['cntsn'] < 440), 4) \
            .when((es_df1['cntsn'] >= 220) & (es_df1['cntsn'] < 330), 3) \
            .when((es_df1['cntsn'] >= 110) & (es_df1['cntsn'] < 220), 2) \
            .when((es_df1['cntsn'] < 110), 1) \
            .otherwise(0).alias('ordernum')

        es_result=es_df1.select(es_df['memberid'],finishtimescore,orderamountscore,ordersnscore)

        # es_result.show()

        # TODO 组合成向量
        vector=VectorAssembler().setInputCols(['days','sumamount','ordernum']).setOutputCol("feature")
        df=vector.transform(es_result)

        # TODO 使用kmeans算法对df进行训练
        from pyspark.ml.clustering import KMeans,KMeansModel
        #设置kmeans算法
        KMeans=KMeans().setK(7).setSeed(10).setMaxIter(2).setFeaturesCol('feature').setPredictionCol("predictstr")
        # 训练
        model=KMeans.fit(df)
        #预测结果
        result=model.transform(df)

        # result.show()

        # 获取聚类中心
        center=model.clusterCenters()
        # print(center)

        #求和
        list1=[float(np.sum(x)) for x in center]

        #加索引，形成字典
        dict={}
        for i in range(len(list1)):
            dict[i]=list1[i]

        #转为列表
        list2=[[k,v] for (k,v) in dict.items()]

        center_df=spark.createDataFrame(list2,['predict','center'])

        # center_df.show()

        centerRDD=center_df.rdd.repartition(1).sortBy(lambda x : x[1], ascending=False)
        # print("sort partition")
        centerRDD.foreach(lambda x:print(x))

        # 合并fivedf
        tmpRDD=centerRDD.union(sc.parallelize([]))
        unionrdd=tmpRDD.repartition(1).map(lambda x:x).zip(five_df.rdd.repartition(1))
        unionrdd.foreach(lambda x:print(x))

        # 合并后的RDD,去predict列和five_df的id列，形成字典
        five_dict=unionrdd.map(lambda row:(row[0][0],row[1][0])).collectAsMap()
        print(five_dict)

        #根据predict列和id列的对应关系，匹配result和five_df
        # new_df = result.select(result['memberid'].alias('userId'),udf(lambda x:five_dict[x],returnType=StringType())(result['predictstr']).alias('tagsId'))
        new_df = result.select(result['memberid'].alias('userId'),
                               udf(lambda x: five_dict[x], returnType=T.StringType())(result['predictstr']).alias(
                                   'tagsId'))
        new_df.show()

        return new_df


if __name__ == '__main__':
    model=CustomerValue(38)
    model.execute()