# -*- coding: utf-8 -*- 
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import SparkSession, functions
from pyspark import SparkConf
from pyspark.sql import Row
import pyspark.sql.functions as F
import pyspark.sql.types as T
import pandas as pd
import argparse
import torch
import time
import json
import sys
import os
import functools
import pickle


# 全局配置
torch.set_num_threads(1)
schema = T.StructType([T.StructField("topic_id", T.StringType(), True),
                       T.StructField("result", T.StringType(), True)
                       ])

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project_name', help='项目名',
                        default="gxhtk_dev", type=str)
    parser.add_argument('--calc_date', help='项目名',
                        default="2022-06-21", type=str)
    parser.add_argument('--custom_part_num', help='自定义分区数',
                        default=200, type=int)
    parser.add_argument('--subject_code', help='学科code',
                        default="02", type=str)
    parser.add_argument('--phase_code', help='学段code',
                        default="03", type=str)
    return parser.parse_args()

PHASE_CODE = parse_args().phase_code
if(PHASE_CODE=="05"):
    PHASE_CODE = False

# 预测函数
def predict_func(part):
    import ctypes
    libgcc_s = ctypes.CDLL('libgcc_s.so.1')
    print("\n执行预测")
    os.environ["KMP_WARNINGS"] = "off"
    try:
        os.chdir("./model")
    except FileNotFoundError as e:
        pass
    sys.path.insert(0, "./") 
    from engine.dureader.answer_extract_engine import MathAnswerExtract
    torch.set_num_threads(1)

    # 加载模型
    print(f"\n加载 {PHASE_CODE} 模型")

    aeb_engine = MathAnswerExtract(phase_code = PHASE_CODE)

# 读取数据预测
    start = time.time()
 
    out_data = []
    for row in part:
        row_dict = json.loads(row["question_info"])
        print(row_dict)
        res_dic = aeb_engine.predict([row_dict])[0]
        #res_dic.update({"id":row_dict["id"]})
        out_data.append((res_dic["topic_id"], json.dumps(res_dic, ensure_ascii=False)))

    return out_data




if "__main__" == __name__:
    conf = SparkConf() \
        .setAppName("pyspark_anchor_predict_1049_test") \
        .set("spark.sql.execution.arrow.enabled", "true")
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()
    spark.sparkContext.setLogLevel("error")

    args = parse_args()

    

    # topic_theme = spark.sql(
    #     "SELECT topic_id as tk_topic_id, question_info FROM {}.dwd_ai_ability_topic_theme_incr WHERE part = '{}'".format(
    #         "gxhtk_pro", args.calc_date))

    # topic_ids = spark.sql(
    #     "SELECT tk_topic_id FROM {}.dwd_ai_ability_storage_topic_incr WHERE part = '{}' and correcting='1' and subject_code = '{}' and phase_code = '{}'".format(
    #         "gxhtk_pro", args.calc_date, args.subject_code, args.phase_code))
    # print("数据量：{}".format(topic_ids.count()))


    id_df = spark.sql(
        f"""
                select
                topic_id,
                question_info
                from
                (
                    select
                    topic_id,
                    question_info,
                    row_number() over(
                        partition by topic_id
                        order by
                        get_json_object(question_info, "$.updateTime") desc
                    ) as rank
                    from
                    {args.project_name}.dwd_ai_ugc_question_sync_di
                )
                where
                rank = "1"
                """)

    # correcting_pre_df = topic_ids.join(topic_theme, "tk_topic_id").cache()


    # correcting_pre_df.select("question_info").repartition(1).write.mode("overwrite") \
    # .json("/project/{}/{}/blwang14/source_test/part={}/subject_code={}/phase_code={}".format(
    #         args.project_name, args.project_name, args.calc_date, args.subject_code, args.phase_code))
    
    correcting_pre_df = spark.read.text("/project/gxhtk_pro/gxhtk_pro/db/dws/dws_zy_except_correct_topic_id_incr_1049/except_correct_topic_id_gxhtk_pro_2023-08")\
        .toDF("question_info").limit(10)

    
    
    correcting_pre_df \
        .repartition(32)\
        .rdd.mapPartitions(predict_func).toDF(schema)\
        .write.mode("overwrite") \
        .json("/project/{}/{}/db/dws/dws_correcting_incr_blwang14/part={}/result".format(
            "gxhtk_dev", "gxhtk_dev", args.calc_date))
        # .json("/project/{}/{}/db/dws/dws_correcting_incr_blwang14/part={}/subject_code={}/phase_code={}".format(
        #     "gxhtk_dev", "gxhtk_dev", args.calc_date, args.subject_code, args.phase_code))