import torch
import argparse
import pandas as pd

from pyspark import SparkConf
import pyspark.sql.types as T
import pyspark.sql.functions as F
from pyspark.sql import SparkSession, functions
from pyspark.sql.functions import pandas_udf, PandasUDFType
import platform
import os
from datetime import datetime
from tqdm import tqdm
import numpy as np

 
schema = T.StructType([T.StructField("message", T.StringType(), True),
                       T.StructField("topic_id", T.StringType(), True),
                       T.StructField("vector", T.StringType(), True)
                       ])


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project_name', help='项目名',
                        default="gxhtk_dev", type=str)
    parser.add_argument('--calc_date', help='数据执行当天',
                        default="2022-06-21", type=str)
    return parser.parse_args()

@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def onehot_0203_128(pandas_df):
    
    torch.set_num_threads(2)
    target_path = "./model/target"

    from mlflow import pyfunc
    
    model = pyfunc.load_model(target_path)
    
    
    #切片长度
    sliced_len = 10
    #df长度
    df_len = len(pandas_df)
    print(f"单个partition的条数:{len(pandas_df)}")

    #刚好整除，则是多少份就多少份，否则，多出来的放到最后一个切片里
    sliced_num = df_len//sliced_len if df_len%sliced_len==0 else df_len//sliced_len+1
    
    #定义一个空的DataFrame
    result_df = pd.DataFrame(columns=["message", "topic_id", "vector"])
    
    sliced_df = np.array_split(pandas_df,sliced_num)
    
    #每 sliced_len 条数据打印一次
    for index,part in tqdm(enumerate(sliced_df,1)):
        print(f"进度：{index}/{sliced_num}开始，数量：{len(part)}")
        result = model.predict(part)
        if "pred_emb" in result.columns.tolist():
            result = result.rename(columns={"message":"message", "image_id":"topic_id", "pred_emb": "vector"})
            result[["vector"]] = result[["vector"]].astype(str)
        else:
            result["vector"] = "[]"
        result_df = pd.concat([result_df,result])
        
    return result_df
    
        
   
def print_time_delta(start_time,end_time):
    if(isinstance(start_time,float)):
        start_time = datetime.fromtimestamp(start_time)
    if(isinstance(start_time,float)):
        end_time = datetime.fromtimestamp(end_time)
    # 获取时间差的小时、分钟和秒
    delta = end_time - start_time
    hours = delta.seconds // 3600
    minutes = (delta.seconds // 60) % 60
    seconds = delta.seconds % 60
    # 输出时间差
    print("耗时 {}小时 {}分钟 {}秒".format(hours, minutes, seconds))

if __name__ == '__main__':
    args = parse_args()

    conf = SparkConf() \
        .setAppName("onehot_0203_128") \
        .set("spark.sql.execution.arrow.enabled", "true")
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()
    

   

    table_name = "dws_photo_search_vector_incr_blwang14_test"
    vector_type = "single_topic_primary_bert_128"
    out_path = "/project/{}/{}/db/{}/{}/part={}/vector_type={}" \
        .format(args.project_name, args.project_name, table_name.split("_")[0],
                table_name, args.calc_date, vector_type)    


    in_spark_df = spark.read.json("/project/edu_ai/edu_ai/blwang14/machine_learning/bert_0203_128/train_new.jsonl")

    
    # 此处的F.spark_partition_id()即为我的文件分区数量
    

    res = in_spark_df \
        .repartition(64)\
        .groupby(F.spark_partition_id()) \
        .apply(onehot_0203_128) \
        .where("message = 'SUCCESS'") \
        .withColumn("subject_code", F.lit("02")) \
        .withColumn("phase_code", F.lit("03")) \
        .select("subject_code", "phase_code", "topic_id", "vector")

    

    res.write.mode("overwrite").option("sep", "\t").csv(out_path)

    #partSql = "(part='{}', vector_type='{}')".format(args.calc_date, vector_type)
    #spark.sql("alter table {}.{} drop if exists partition {}".format(args.project_name, table_name, partSql))
    #spark.sql("alter table {}.{} add partition {}".format(args.project_name, table_name, partSql))

    spark.stop()
