'''
@Project --> File: TFECUserProfile -> BaseModelAbstract
@IDE: PyCharm
@Author: Burke
@Date: 2023/6/16
'''
import abc

from pyspark import SparkContext
from pyspark.sql import SparkSession,functions as F, DataFrame
import os

# TODO 0.准备Spark开发环境
from pyspark.sql.functions import udf

from index_development.ESMate.ESUtil import ESMate

os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/bin/python3'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/bin/python3'


spark = SparkSession \
    .builder \
    .appName('BaseModelAbstract') \
    .master('local[*]')\
    .config('spark.sql.decimalOperations.allowPrecisionLoss','false')\
    .getOrCreate()

sc = spark.sparkContext


def getMysqlDF():
    url = "jdbc:mysql://up01:3306/tags_new?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&useSSL=false&user=root&password=123456"
    tableName = "tbl_basic_tag"

    metaDF = spark.read.jdbc(url, tableName)
    print("======================= 2- 读取MySQL中的数据 =======================")
    metaDF.limit(5).show(truncate=False)
    return metaDF


def getEsMeta(id,mysqlDF):
    # 过滤出id = 14(年龄段标签)rule信息
    print("======================= 3-1 读取指定标签中的数据 =======================")
    fitter_df: DataFrame = mysqlDF.select(mysqlDF.rule).where(f'id=={id}')
    fitter_df.show(truncate=False)

    # 对rule进行拆解, 转换成字典
    print("======================= 3-2 根据指定标签中的rule第一步解析 =======================")
    split_df: DataFrame = fitter_df.select(F.split("rule", '##').alias('kv'))
    split_df.show(truncate=False)

    split_rdd: list = split_df.collect()[0].kv

    print("======================= 3-3 根据指定标签中的rule第二步解析 =======================")
    result: dict = dict(map(lambda x: x.split('='), split_rdd))
    print(result)

    esmate = ESMate.get_dict(result)
    print(esmate)
    return esmate


def getEsMetadata(esMeta):
    print('======================= 4- 根据解析的rule读取ES数据 =======================')
    esDF: DataFrame = spark.read \
        .format("es") \
        .option('es.nodes', f'{esMeta.esNodes}') \
        .option('es.resource', f'{esMeta.esIndex}/{esMeta.esType}') \
        .option("es.index.read.missing.as.empty", "yes") \
        .option("es.query", "?q=*") \
        .option("es.write.operation", "upsert") \
        .option('es.mapping.date.rich','false') \
        .option("es.read.field.include", f'{esMeta.selectFields}') \
        .load()
    #
    # esDF: DataFrame = esDF \
    #     .withColumnRenamed('id', 'userId')

    esDF.limit(5).show(truncate=False)

    return esDF


def getFiveDF(pid, mysqlDF):
    print('======================= 5- 读取和年龄相关的五级标签 =======================')
    fiveDF: DataFrame = mysqlDF.select(mysqlDF.id, mysqlDF.rule).where(f'pid=={pid}')
    fiveDF.limit(5).show()
    return fiveDF


def getOldDF(esMeta):
    OldDF : DataFrame = spark.read \
        .format("es") \
        .option("es.resource", f"tfec_userprofile_result/{esMeta.esType}") \
        .option("es.nodes", f"{esMeta.esNodes}") \
        .option("es.index.read.missing.as.empty", "yes") \
        .option("es.query", "?q=*") \
        .option("es.read.field.include", "userId,tagsId") \
        .load()
    print('======================= 6-2 OldDF =======================')
    OldDF.limit(5).show(truncate=False)

    return OldDF

@udf
def mergeUDF(newTagId : str,oldTagId : str) -> str:
    if newTagId is None:
        return  oldTagId
    if oldTagId is None:
        return  newTagId
    else:
        newArr: list = str(newTagId).split(",")
        oldArr: list = str(oldTagId).split(",")
        resultTagId = newArr + oldArr
    return ','.join(set(resultTagId))


def extendNewDFAndOldDF(newDF, oldDF):
    print('======================= 6-2 result =======================')
    result : DataFrame = newDF.join(oldDF,on = newDF.userId == oldDF.userId)\
        .select(newDF.userId,
                mergeUDF(newDF.tagsId,oldDF.tagsId).alias("tagsId"))
    result.orderBy(result.userId).show()
    return result

def EsWrite(result,esMeta):
    print('======================= 7 写入ES =======================')
    result.orderBy(result.userId).write \
        .format("es") \
        .option("es.resource", f"tfec_userprofile_result/{esMeta.esType}") \
        .option("es.nodes", f"{esMeta.esNodes}") \
        .option("es.mapping.id", f"userId") \
        .option("es.mapping.name", f"userId:userId,tagsId:tagsId") \
        .option("es.write.operation", "upsert") \
        .mode("append") \
        .save()



class BaseModel(metaclass=abc.ABCMeta):

    @abc.abstractmethod
    def getTagId(self):
        pass

    @abc.abstractmethod
    def compute(self, esDF: DataFrame, fiveDF: DataFrame, sc:SparkSession, spark:SparkContext):
        pass

    def execute(self):

        # TODO 1.读取MySQL中的数据
        mysqlDF = getMysqlDF()

        # TODO 2.读取模型/标签相关的4级标签rule并解析--==标签id不一样==
        # pid = self.getTagId()
        esMeta = getEsMeta(self.getTagId(),mysqlDF)

        # TODO 3.根据解析出来的rule读取ES数据
        esDF = getEsMetadata(esMeta)

        # TODO 4.读取模型/标签相关的5级标签(根据4级标签的id作为pid查询)---==标签id不一样==
        fiveDF = getFiveDF(self.getTagId(),mysqlDF)

        # TODO 5.根据ES数据和5级标签数据进行匹配,得出userId,tagsId---==实现代码不一样==
        newDF : DataFrame= self.compute(esDF,fiveDF,sc,spark)

        # TODO 6.查询elasticsearch中的oldDF
        # try:
        #     oldDF : DataFrame= getOldDF(esMeta)
        # except:
        #     EsWrite(newDF, esMeta)
        #     return
        #
        # # TODO 7.合并newDF和oldDF
        # result = extendNewDFAndOldDF(newDF,oldDF)
        # # TODO 8.将最终结果写到ES
        # EsWrite(result, esMeta)
        #
        # spark.stop()



