from pyspark.sql import SparkSession, DataFrame
import os
import pyspark.sql.functions as F
from pyspark.sql.types import StringType

from cn.itcast.tags.bean.ESMeta import ruleToESMeta

# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'


@F.udf(returnType=StringType())
def merge_tags(new_tags, old_tags):
    # 1.如果old_tags为空，则返回new_tags
    if old_tags is None:
        return new_tags
    # 2.如果new_tags为空，则返回old_tags
    elif new_tags is None:
        return old_tags
    # 3.如果都不为空，则进行合并
    else:
        # 对new_tags进行切分
        new_tags_list = str(new_tags).split(",")
        # 对old_tags进行切分
        old_tags_list = str(old_tags).split(",")
        # 对切分后的结果进行合并
        result_tags = new_tags_list + old_tags_list
        # 把列表拼接成字符串返回
        return ','.join(set(result_tags))


class BaseApp(object):
     def __init__(self,four_id):
         self.spark = SparkSession.builder\
             .master("local[*]")\
             .appName("test")\
             .config("spark.sql.shuffle.partitions", 4)\
             .getOrCreate()
         self.four_id = four_id


     def read_from_mysql(self):
         input_df = self.spark.read.jdbc(url="jdbc:mysql://up01:3306/tfec_tags",
                    table="tbl_basic_tag",
                    properties={'user':'root','password':'123456'})
         return input_df

     def rule_to_es_meta(self,input_df):
         rule_df = input_df.where(f'id = {self.four_id}').select('rule')
         rule = rule_df.first()['rule']
         esMeta = ruleToESMeta(rule)
         return esMeta

    # 3.根据esMeta对象从ES中读取相应的业务数据
     def read_from_es(self,esMeta):
        es_df = self.spark.read.format("es") \
            .option("es.resource", esMeta.esIndex) \
            .option("es.nodes", esMeta.esNodes) \
            .option("es.read.field.include", esMeta.selectFields) \
            .option("es.mapping.date.rich", "False") \
            .load()
        return es_df

     def get_five_tags(self,input_df):
         five_df = input_df.where(f'pid = {self.four_id}').select('id','rule')
         return five_df

     def compute(self, es_df, five_df):
         pass

     def read_old_df_from_es(self, esMeta):
         old_df = self.spark.read \
             .format("es") \
             .option("es.resource", "tfec_userprofile_result") \
             .option("es.nodes", esMeta.esNodes) \
             .load()
         return old_df

     def merge_new_df_and_old_df(self,new_df,old_df):
         result_df =  new_df.join(other = old_df,
                     on = new_df['userId']==old_df['userId'],
                     how='left')\
             .select(new_df['userId'].cast(StringType()),
                     merge_tags(new_df['tagsId'], old_df['tagsId']).alias("tagsId"))
         return result_df

     def save_result_df_to_es(self, result, esMeta):
         result.write \
             .mode("append") \
             .format("es") \
             .option("es.resource", "tags_result") \
             .option("es.nodes", esMeta.esNodes) \
             .option("es.mapping.id", "userId") \
             .save()

     def close(self):
         self.spark.stop()


     def execute(self):
         input_df = self.read_from_mysql()
         esMeta = self.rule_to_es_meta(input_df)
         es_df = self.read_from_es(esMeta)
         five_df = self.get_five_tags(input_df)
         new_df = self.compute(es_df,five_df)
         try:
             old_df = self.read_old_df_from_es(esMeta=esMeta)
             result = self.merge_new_df_and_old_df(new_df,old_df)
         except:
             print("--------------首次运行，跳过合并----------------")
             result = new_df
         result.show()
         # self.save_result_df_to_es(result, esMeta)
         self.close()
