from __future__ import print_function

from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import SparkSession, Row
from pyspark import SparkConf
from pyspark.sql.types import StructType, StructField, StringType
import pyspark.sql.functions as F
import pyspark.sql.types as T

import sys
import time
import json
import argparse


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project_name', help='项目名',
                        default="gxhtk_dev", type=str)
    parser.add_argument('--calc_date', help='数据执行当天',
                        default="2022-06-21", type=str)
    return parser.parse_args()


# mapPartition处理函数
def predict_func(part):
    from dep_lib import ContentPreprocess as pre

    # 定义输出列
    out_res_list = []

    # 遍历输入数据
    count = 0
    for row in part:

        # 打印进度
        count += 1
        if count % 100 == 0:
            print("{} 数据预测进度：{}".format(time.time(), count))
        # 格式化输入数据
        question_info =  json.loads(row["question_info"])
        if question_info["subjectCode"] == "02" and question_info["phaseCodes"][0] == "04":
            feature = pre.process_question_content(question_info["content"])
            out_res = "\t".join(["04", "02", question_info["id"], feature])
            out_res_list.append(Row(value=out_res))

    return out_res_list


if __name__ == '__main__':
    args = parse_args()

    # 定义 SparkSession
    conf = SparkConf() \
        .setAppName("simcse_02_04")
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()

    # 读取输入数据
    # in_df = spark.read.parquet(f"/project/{args.project_name}/{args.project_name}/db/dwd/dwd_ai_ability_topic_theme_incr/part={args.calc_date}")
    in_df = spark.sql(f"SELECT topic_id,question_info FROM {args.project_name}.dwd_ai_ability_topic_theme_incr")
    # id_df = spark.read.parquet(f"/project/{args.project_name}/{args.project_name}/db/dwd/dwd_ai_ability_storage_topic_incr/part={args.calc_date}")
    id_df = spark.sql(f"SELECT tk_topic_id FROM {args.project_name}.dwd_ai_ability_storage_topic_incr WHERE part <= '{args.calc_date}'").where("subject_code='02' and phase_code='04'").select("tk_topic_id").toDF("topic_id").distinct()

    # 推理数据
    schema = T.StructType([T.StructField("value", T.StringType(), True)])
    in_df.show()
    df = in_df.join(id_df, ["topic_id"]).repartition(1).rdd.mapPartitions(predict_func).toDF(schema)

    # 写出数据
    df.write.mode("overwrite").text(f"/project/{args.project_name}/{args.project_name}/db/dwd/dwd_photo_search_feature_incr/part={args.calc_date}/feature_type=photosearch_topic_simcse_128_02_04")

    partSql = f"(part='{args.calc_date}', feature_type='photosearch_topic_simcse_128_02_04')"
    spark.sql(f"alter table {args.project_name}.dwd_photo_search_feature_incr drop if exists partition {partSql}")
    spark.sql(f"alter table {args.project_name}.dwd_photo_search_feature_incr add partition {partSql}")

    spark.stop()
