from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType


from cn.itcast.tag.bean.ESMeta import ruleToESMeta


"""
-------------------------------------------------
   Description :	TODO：
   SourceFile  :	GenderModel
   Author      :	itcast team
-------------------------------------------------
"""


# 设置环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'


# 创建SparkSession
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .getOrCreate()


# 从MySQL数据库读取数据
input_df = spark.read.jdbc(url='jdbc:mysql://up01:3306/tfec_tags',
                table='tbl_basic_tag',
                properties={'user':'root','password':'123456'})
# 筛选出id为4的数据，并选择rule列
four_df = input_df.where('id = 4').select("rule")


# 获取rule列的第一个值
rule = four_df.first()['rule']


# 将rule转换为ESMeta对象
esMeta = ruleToESMeta(rule)
# four_df.printSchema()
# four_df.show(truncate=False)


# 从Elasticsearch读取数据
es_df = spark.read.format("es")\
    .option("es.resource",esMeta.esIndex)\
    .option("es.nodes",esMeta.esNodes)\
    .option("es.read.field.include",esMeta.selectFields)\
    .load()
# es_df.printSchema()
# es_df.show()


# 筛选出pid为4的数据，并选择id和rule列
five_df = input_df.where("pid = 4").select("id","rule")
# five_df.printSchema()
# five_df.show()


# 将es_df和five_df进行左连接，并选择userId和tagsId列
new_df = es_df.join(other=five_df,
                       on=es_df['gender'] == five_df['rule'],
                       how='left').select(es_df['id'].alias("userId"),five_df['id'].alias("tagsId"))
# new_df.printSchema()
# new_df.show()


# 读取历史标签结果
old_df = spark.read\
    .format("es")\
    .option("es.resource","tags_result")\
    .option("es.nodes",esMeta.esNodes)\
    .load()
old_df.printSchema()
old_df.show()


# 定义一个UDF函数，用于合并标签
@F.udf(returnType=StringType())
def merge_tags(new_tags, old_tags):


    if old_tags is None:
        return new_tags


    elif new_tags is None:
        return old_tags


    else:


        new_tags_list = str(new_tags).split(",")


        old_tags_list = str(old_tags).split(",")


        result_tags = new_tags_list + old_tags_list
        #把列表拼接成字符串返回
        return ','.join(set(result_tags))




# 将new_df和old_df进行左连接，并调用merge_tags函数合并标签
result_df = new_df.join(other=old_df,
            on=new_df['userId'] == old_df['userId'],
            how='left')\
    .select(new_df['userId'].cast(StringType()),
            merge_tags(new_df['tagsId'],old_df['tagsId']).alias("tagsId"))
result_df.printSchema()
result_df.show()


# 将结果写入Elasticsearch
result_df.write\
    .mode("append")\
    .format("es")\
    .option("es.resource","tags_result")\
    .option("es.nodes",esMeta.esNodes)\
    .option("es.mapping.id","userId")\
    .save()


# 关闭SparkSession
spark.stop()
