from pyspark.sql import SparkSession, DataFrame
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType


from cn.itcast.tag.bean.ESMeta import ruleToESMeta


"""
-------------------------------------------------
   Description :	TODO：基类重构
   SourceFile  :	BaseModel
   Author      :	itcast team
-------------------------------------------------
"""




# 设置环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'




# 定义一个用户自定义函数，用于合并标签
@F.udf(returnType=StringType())
def merge_tags(new_tags, old_tags):

    # 如果旧标签为空，返回新标签
    if old_tags is None:
        return new_tags

    # 如果新标签为空，返回旧标签
    elif new_tags is None:
        return old_tags

    # 否则合并两个标签列表，并去除重复项
    else:


        new_tags_list = str(new_tags).split(",")


        old_tags_list = str(old_tags).split(",")


        result_tags = new_tags_list + old_tags_list


        return ','.join(set(result_tags))




# 定义一个基类BaseModel
class BaseModel(object):


    def __init__(self, fourId):
        # 初始化SparkSession
        # 把spark变量声明为类的属性，以方便在下面的步骤继续使用
        self.spark = SparkSession \
            .builder \
            .master("local[2]") \
            .appName("SparkSQLAppName") \
            .config("spark.sql.shuffle.partitions", 4) \
            .getOrCreate()
        self.fourId = fourId




    # 从MySQL读取数据
    def read_from_mysql(self):
        input_df = self.spark.read.jdbc(url='jdbc:mysql://up01:3306/tfec_tags',
                                   table='tbl_basic_tag',
                                   properties={'user': 'root', 'password': '123456'})
        return input_df




    # 将规则转换为ESMeta对象
    def rule_to_es_meta(self, input_df):
        four_df = input_df.where(f'id = {self.fourId}').select("rule")


        rule = four_df.first()['rule']


        esMeta = ruleToESMeta(rule)
        return esMeta


    # 从Elasticsearch读取数据
    def read_from_es(self, esMeta):
        es_df = self.spark.read.format("es") \
            .option("es.resource", esMeta.esIndex) \
            .option("es.nodes", esMeta.esNodes) \
            .option("es.read.field.include", esMeta.selectFields) \
            .option("es.mapping.date.rich", "False") \
            .load()
        return es_df




    # 获取五级标签
    def get_five_tags(self, input_df):
        five_df: DataFrame = input_df.where(f"pid = {self.fourId}").select("id", "rule")
        return five_df




    # 计算函数（未实现）
    def compute(self, es_df, five_df):
        pass




    # 从Elasticsearch读取旧数据
    def read_old_df_from_es(self, esMeta):
        old_df = self.spark.read \
            .format("es") \
            .option("es.resource", "tags_result") \
            .option("es.nodes", esMeta.esNodes) \
            .load()
        return old_df


    # 合并新数据和旧数据
    def merge_new_df_and_old_df(self, new_df, old_df):
        result_df = new_df.join(other=old_df,
                                on=new_df['userId'] == old_df['userId'],
                                how='left') \
            .select(new_df['userId'].cast(StringType()),
                    merge_tags(new_df['tagsId'], old_df['tagsId']).alias("tagsId"))
        return result_df




    # 将结果保存到Elasticsearch
    def save_result_df_to_es(self, result_df, esMeta):
        result_df.write \
            .mode("append") \
            .format("es") \
            .option("es.resource", "tags_result") \
            .option("es.nodes", esMeta.esNodes) \
            .option("es.mapping.id", "userId") \
            .save()




    # 关闭SparkSession
    def close(self):
        self.spark.stop()






    # 执行函数
    def execute(self):


        # 从MySQL读取数据
        input_df = self.read_from_mysql()


        # 将规则转换为ESMeta对象
        esMeta = self.rule_to_es_meta(input_df)


        # 从Elasticsearch读取数据
        es_df = self.read_from_es(esMeta)


        # 获取五级标签
        five_df = self.get_five_tags(input_df)


        # 计算新数据
        new_df = self.compute(es_df, five_df)
        try:
            # 从Elasticsearch读取旧数据
            old_df = self.read_old_df_from_es(esMeta=esMeta)
            # 合并新数据和旧数据
            result_df = self.merge_new_df_and_old_df(new_df=new_df, old_df=old_df)
        except:
            # 捕获异常，如果是首次运行，则跳过合并步骤
            print("--------------首次运行，跳过合并----------------")
            result_df = new_df
        # 将结果保存到Elasticsearch
        self.save_result_df_to_es(result_df, esMeta)
        # 关闭SparkSession
        self.close()