import os
import numpy as np
import pyspark.sql.functions as F
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql import SparkSession, types as T  # 修改导入
from pyspark.sql.functions import *
from pyspark.sql import SparkSession, types as T  # 修改导入
from tag.base.baseModel import BaseModel
# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

# TODO 0.准备Spark开发环境(重复)
spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc = spark.sparkContext

class Active(BaseModel):
    def compute(self, es_df, five_df):
        # es_df.show()

        daysAgg = F.datediff(F.current_timestamp(), F.max("log_time")).alias("days")
        numAgg = F.count("loc_url").alias("num")
        timenumAgg = F.countDistinct("loc_url").alias("timenum")

        # 1.根据用户id进行分组,求出RFE
        tmpDF = es_df.groupBy("global_user_id").agg(daysAgg, numAgg, timenumAgg)
        tmpDF.show(10, truncate=False)

        # 上标签
        daysScore=F.when(F.col("days").between(0,15),5) \
                   .when(F.col("days").between(16,30),4) \
                   .when(F.col("days").between(31,45),3) \
                   .when(F.col("days").between(46,60),2) \
                   .when(F.col("days")>60,1).alias("days")

        numScore=F.when(F.col("num")>560,5) \
             .when(F.col("num").between(560,760),4) \
             .when(F.col("num").between(400,559),3) \
             .when(F.col("num").between(250,399),2) \
             .when(F.col("num")<250,1).alias("num")

        timenumScore=F.when(F.col("timenum")>300,5) \
                 .when(F.col("timenum").between(250,300),4) \
                 .when(F.col("timenum").between(200,250),3) \
                 .when(F.col("timenum").between(50,200),2) \
                 .when(F.col("timenum")<50,1).alias("timenum")

        result_df=tmpDF.select(tmpDF['global_user_id'].alias("userId"),daysScore,numScore,timenumScore)
        # result_df.show(10)

        # # TODO 组合成向量列
        vector=VectorAssembler().setInputCols(["days","num","timenum"]).setOutputCol("feature")
        df=vector.transform(result_df)

        # TODO 创建Kmeanssuanfa
        from pyspark.ml.clustering import KMeans, KMeansModel
        KMeans=KMeans().setK(4).setSeed(10).setMaxIter(10).setFeaturesCol("feature").setPredictionCol("predict")
        model=KMeans.fit(df)

        # 根据模型预测结果
        result=model.transform(df)
        result.show()

        #获取聚类中心
        center=model.clusterCenters()
        #求和
        list1=[float(np.sum(x)) for x in center]
        print("sum cluer",list1)

        # todo 加索引,形成字典
        dict = {}
        for i in range(len(list1)):
            dict[i] = list1[i]
        print("from dict", dict)

        # TODO 转为列表
        list2=[[k,v] for (k , v) in dict.items()]
        print(list2)

        center_df=spark.createDataFrame(list2,['predict','center'])
        center_df.show()

        centerRDD = center_df.rdd.repartition(1).sortBy(lambda x: x[1], ascending=False)
        print("sort partition")
        centerRDD.foreach(lambda x: print(x))

        # TODO 合并fivedf
        temprdd = centerRDD.union(sc.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(five_df.rdd.repartition(1))
        unionrdd.foreach(lambda x: print(x))

        # todo 合并后的rdd,取predict列和fivedf的id列,形成字典
        five_dict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        print(five_dict)

        # todo 5-9 根据predict列和id列的对应关系,,匹配resultdf和fivedf
        newDF =result.select(result['userId'],udf(lambda x: five_dict[x], returnType=T.StringType())(result['predict']).alias('tagsId'))
        newDF.show()

        return newDF


if __name__ == '__main__':
    model=Active(46)
    model.execute()