# -*- coding: utf-8 -*- 
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import SparkSession, functions
from pyspark import SparkConf
from pyspark.sql import Row
import pyspark.sql.functions as F
import pyspark.sql.types as T
import pandas as pd
import argparse
import torch
import time
import json
import sys
import os
import functools
import pickle


# 全局配置
torch.set_num_threads(1)
schema = T.StructType([T.StructField("topic_id", T.StringType(), True),
                       T.StructField("result", T.StringType(), True)
                       ])

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project_name', help='项目名',
                        default="gxhtk_dev", type=str)
    parser.add_argument('--calc_date', help='项目名',
                        default="2022-06-21", type=str)
    parser.add_argument('--custom_part_num', help='自定义分区数',
                        default=200, type=int)
    parser.add_argument('--subject_code', help='学科code',
                        default="02", type=str)
    parser.add_argument('--phase_code', help='学段code',
                        default="03", type=str),
    parser.add_argument('--is_incr', help='是否为增量',
                    default=False, type=bool)
    return parser.parse_args()

PHASE_CODE = parse_args().phase_code
if(PHASE_CODE=="05"):
    PHASE_CODE = False

# 预测函数
def predict_func(part):
    import ctypes
    libgcc_s = ctypes.CDLL('libgcc_s.so.1')
    print("\n执行预测")
    os.environ["KMP_WARNINGS"] = "off"
    try:
        os.chdir("./model")
    except FileNotFoundError as e:
        pass
    sys.path.insert(0, "./") 
    from engine.dureader.answer_extract_engine import MathAnswerExtract
    torch.set_num_threads(1)

    # 加载模型
    print(f"\n加载 {PHASE_CODE} 模型")

    aeb_engine = MathAnswerExtract(phase_code = PHASE_CODE)

# 读取数据预测
    start = time.time()
 
    out_data = []
    part_list = list(part)
    print(f"单个partition的条数:{len(part_list)}")
    count = 0
    for row in part_list:
        count += 1
        row_dict = json.loads(row["question_info"])
        res_dic = aeb_engine.predict([row_dict])[0]
        out_data.append((res_dic["topic_id"], json.dumps(res_dic, ensure_ascii=False)))
        if(count%10==0):
            print(f"已经完成{count}条")

    return out_data


def get_ugc_theme(spark,args):
    ugc_schema = T.StructType([T.StructField("tk_topic_id", T.StringType(), True),
                       T.StructField("question_info", T.StringType(), True)
                       ])
    empty_rdd = spark.sparkContext.emptyRDD()
    topic_theme = spark.createDataFrame(empty_rdd,schema=ugc_schema)
    try:
        topic_theme = spark.sql(
        f"""
            select
                topic_id as tk_topic_id,
                question_info
            from
                (
                    select
                        topic_id,
                        question_info,
                        row_number() over(
                            partition by topic_id
                            order by
                            get_json_object(question_info, "$.updateTime") desc
                        ) as rank
                    from
                        {args.project_name}.dwd_ai_ugc_question_sync_di
                )
            where
                rank = "1"
                """)
    except:
        print(
        f"""
            select
                topic_id as tk_topic_id,
                question_info
            from
                (
                    select
                        topic_id,
                        question_info,
                        row_number() over(
                            partition by topic_id
                            order by
                            get_json_object(question_info, "$.updateTime") desc
                        ) as rank
                    from
                        {args.project_name}.dwd_ai_ugc_question_sync_di
                )
            where
                rank = "1"
                """)
        raise RuntimeError("主动终止,查看sql语句")
    assert topic_theme.count() != 0 , "ugc题面为空"
    print(f"ugc全量题面量：{topic_theme.count()}")
    return topic_theme

class Theme:
    @classmethod
    def getAllTheme(cls):
        raise NotImplementedError("子类必须实现getAllTheme()方法")

if "__main__" == __name__:
    conf = SparkConf() \
        .setAppName("pyspark_anchor_predict_1049") \
        .set("spark.sql.execution.arrow.enabled", "true")
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()
    spark.sparkContext.setLogLevel("error")

    args = parse_args()

    
    date_length = len(args.calc_date.split("-"))
    
    if(date_length==3):
        if(args.is_incr):
            pass
        else:
            #全量大题库的题面获取未实现
            Theme.getAllTheme()
    elif(date_length==4):
        topic_theme = get_ugc_theme(spark,args)
    else:
        raise ValueError(f"非法参数：{args.calc_date}")
    
    # if(args.is_incr):
    #     topic_theme = spark.sql(
    #         "SELECT topic_id as tk_topic_id, question_info FROM {}.dwd_ai_ability_topic_theme_incr WHERE part = '{}'".format(
    #             args.project_name, args.calc_date))
    #     try:
    #         assert(topic_theme.count()!=0)
    #         print(f"dwd_ai_ability_topic_theme_incr在part={args.calc_date}分区不为0")
    #         print(f"使用dwd_ai_ability_topic_theme_incr表资源")
    #     except:
    #         print(f"dwd_ai_ability_topic_theme_incr在part={args.calc_date}分区为0")
    #         print(f"使用dwd_ai_ugc_question_sync_di表资源")
            

    topic_ids = spark.sql(
        "SELECT tk_topic_id FROM {}.dwd_ai_ability_storage_topic_incr WHERE correcting='1' and subject_code = '{}' and phase_code = '{}'".format(
            args.project_name, args.subject_code, args.phase_code))

    if(args.is_incr):
        topic_ids = topic_ids.where(f"part >= '2023-10-17-08' and part <= '{args.calc_date}'")
        # topic_ids = topic_ids.where(f"part <= '{args.calc_date}'")
        
    print("试题数据量：{}".format(topic_ids.count()))

    correcting_pre_df = topic_ids.join(topic_theme, "tk_topic_id")
    print("加载模型前数据量：{}".format(correcting_pre_df.count()))
    correcting_pre_df.show(10)

    correcting_res = correcting_pre_df.repartition(16).rdd.mapPartitions(predict_func).toDF(schema).cache()
    print(f"加载模型后数据量：{correcting_res.count()}")

    correcting_res.show(10)
    
    correcting_res \
        .repartition(1)\
        .write.mode("overwrite") \
        .parquet("/project/{}/{}/db/dws/dws_correcting_incr/part={}/subject_code={}/phase_code={}".format(
            args.project_name, args.project_name, args.calc_date, args.subject_code, args.phase_code))
    
    spark.sql("alter table {}.dws_correcting_incr drop if exists partition (part='{}', subject_code='{}', phase_code='{}')".format(args.project_name, args.calc_date, args.subject_code, args.phase_code))
    spark.sql("alter table {}.dws_correcting_incr add partition (part='{}', subject_code='{}', phase_code='{}')".format(args.project_name, args.calc_date, args.subject_code, args.phase_code))