# -*- coding: utf-8 -*- 
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import SparkSession, functions
from pyspark import SparkConf
from pyspark.sql import Row
import pyspark.sql.functions as F
import pyspark.sql.types as T
import pandas as pd
import argparse
import torch
import time
import json
import sys
import os
import functools
import pickle


# 全局配置
torch.set_num_threads(1)
schema = T.StructType([T.StructField("topic_id", T.StringType(), True),
                       T.StructField("result", T.StringType(), True)
                       ])

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project_name', help='项目名',
                        default="gxhtk_dev", type=str)
    parser.add_argument('--calc_date', help='项目名',
                        default="2022-06-21", type=str)
    parser.add_argument('--custom_part_num', help='自定义分区数',
                        default=200, type=int)
    parser.add_argument('--subject_code', help='学科code',
                        default="02", type=str)
    parser.add_argument('--phase_code', help='学段code',
                        default="03", type=str)
    return parser.parse_args()

PHASE_CODE = parse_args().phase_code

# 预测函数
def predict_func(part):
    import ctypes
    libgcc_s = ctypes.CDLL('libgcc_s.so.1')
    print("\n执行预测")
    os.chdir("./model")
    sys.path.insert(0, "./")  
    from engine.dureader.answer_extract_engine import MathAnswerExtract
    torch.set_num_threads(1)

    # 加载模型
    print(f"\n加载 {PHASE_CODE} 模型")
    aeb_engine = MathAnswerExtract(phase_code = PHASE_CODE)

# 读取数据预测
    start = time.time()
    df_res = pd.DataFrame(columns=['topic_id', 'result'])

    for row in part:
        print(row.question_info.values[0].strip())
        row_dict = json.loads(row.question_info.values[0].strip())
        res_dic = aeb_engine.predict([row_dict])[0]
        df_res = df_res.append(pd.Series([res_dic['topic_id'] , json.dumps(res_dic, ensure_ascii=False)], index=['topic_id', 'result']), ignore_index=True)

    end = time.time()
    print(f'cost {end - start}s inside')

    del aeb_engine

    return df_res




if "__main__" == __name__:
    conf = SparkConf() \
        .setAppName("pyspark_anchor_predict")
    spark = SparkSession.builder.master("local").config(conf=conf).enableHiveSupport().getOrCreate()
    spark.sparkContext.setLogLevel("error")

    args = parse_args()

    

    topic_theme = spark.sql(
        "SELECT topic_id as tk_topic_id, question_info FROM {}.dwd_ai_ability_topic_theme_incr WHERE part = '{}'".format(
            args.project_name, args.calc_date))

    topic_ids = spark.sql(
        "SELECT tk_topic_id FROM {}.dwd_ai_ability_storage_topic_incr WHERE part = '{}' and correcting='1' and subject_code = '{}' and phase_code = '{}'".format(
            args.project_name, args.calc_date, args.subject_code, args.phase_code))
    print("数据量：{}".format(topic_ids.count()))


    

    correcting_pre_df = topic_ids.join(topic_theme, "tk_topic_id")

    correcting_pre_df.show()

    correcting_pre_df \
        .rdd.mapPartitions(predict_func).toDF(schema) \
        .show(False)
  
    
    correcting_pre_df \
        .rdd.mapPartitions(predict_func).toDF(schema) \
        .write.mode("overwrite") \
        .parquet("/project/{}/{}/db/dws/dws_correcting_incr/part={}/subject_code={}/phase_code={}".format(
            "gxhtk_dev", "gxhtk_dev", args.calc_date, args.subject_code, args.phase_code))