from pyspark.sql import SparkSession, DataFrame
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType
from cn.itcast.tag.bean.ESMeta import ruleToESMeta

"""
-------------------------------------------------
   Description :	TODO：
   SourceFile  :	AgeTag
   Author      :	itcast team
-------------------------------------------------
"""

# 设置环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

# 创建SparkSession
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .getOrCreate()

# 从MySQL数据库读取数据
index_df = spark.read.jdbc(url='jdbc:mysql://up01:3306/tfec_tags',
                          table='tbl_basic_tag',
                          properties={'user': 'root', 'password': '123456'})
# 筛选id为14的记录，并选取rule列
four_df:DataFrame = index_df.where('id=14').select('rule')
# four_df.printSchema()
# four_df.show(truncate=False)


# 获取rule列的第一个值
rule = four_df.first()['rule']


# 将rule转换为ESMeta对象
esMeta = ruleToESMeta(rule)




# 从Elasticsearch读取数据
es_df = spark.read\
    .format("es")\
    .option("es.resource",esMeta.esIndex)\
    .option("es.nodes",esMeta.esNodes)\
    .option("es.read.field.include",esMeta.selectFields)\
    .option("es.mapping.date.rich","False") \
    .load()




# 对es_df进行处理，将birthday列中的"-"替换为空
es_df = es_df.select("id",F.regexp_replace("birthday","-","").alias("birthday"))
# es_df.printSchema()
# es_df.show()




# 从index_df中筛选pid为14的记录，并将rule列按照"-"进行分割，取第一部分作为start，第二部分作为end
five_df = index_df.where("pid = 14").select("id",
                                            F.split("rule","-")[0].alias("start"),
                                            F.split("rule","-")[1].alias("end"))
# five_df.printSchema()
# five_df.show()




# 将es_df和five_df进行连接，连接条件为es_df的birthday列在five_df的start和end之间
result_df = es_df.join(other=five_df,
           on=es_df['birthday'].between(five_df['start'],five_df['end']),
           how='left').select(es_df['id'].alias("userId"),five_df['id'].alias("tagsId"))
result_df.printSchema()
result_df.show()




# 将结果写入Elasticsearch
result_df.write\
    .mode("append")\
    .format("es")\
    .option("es.resource","tags_result")\
    .option("es.nodes",esMeta.esNodes)\
    .option("es.mapping.id","userId")\
    .save()


# 停止SparkSession
spark.stop()