import os
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import *
from base_class.base_utils import spend_time
from aws_model.athena_submit import submit
from base_class.inspect_logging import print_to_log
from project_utils.get_fridge_mappings import get_duplicate
import json


# 源表和目标表
# client = boto3.client('athena')


@spend_time("一个SQL耗时： ")
def insert_fridge_mapping(is_hour, mapping_json, category, materials, p_date):
    mappings = json.loads(mapping_json)
    if p_date < "2022-01-13":
        source_table = fridge_history_table
        target_table = fridge_target_table
        target_columns = [f'"{v["value"]}"' for v in mappings.values()]
        source_columns = [f'"COALESCE(TRY(CAST ({k} AS {mappings[k]["data_type"]})),null)"' for k in mappings]
        query = f"""INSERT INTO {target_table} ({",".join(target_columns)}, source)
        SELECT {",".join(source_columns)},
        'history' as source FROM {source_table} WHERE """
        query += f''' material in ({"'" + "','".join(materials) + "'"}) '''
    else:
        if is_hour:
            source_table = fridge_hbase_table_hour
            target_table = fridge_target_table_hour
        else:
            source_table = fridge_hbase_table
            target_table = fridge_target_table
        only_result, duplicate_result = get_duplicate(mappings)
        # 重复的key与非重复的key对应的新的字段名
        target_columns = [f'"{only_result[k]["value"]}"' for k in only_result.keys()] + [i for i in
                                                                                         duplicate_result.keys()]
        # 非重复的字段的数据来源
        source_columns = [f'"COALESCE(TRY(CAST ({k} AS {only_result[k]["data_type"]})),null)"' for k in
                          only_result.keys()]
        # 重复的字段的数据来源
        duplicate_sql = ",".join([f"""COALESCE(TRY(CAST (CASE WHEN {duplicate_result[i]["value2"]} is null THEN
         {duplicate_result[i]["value1"]} ELSE {duplicate_result[i]["value2"]} END  AS
         {duplicate_result[i]["data_type"]})),null) as {i}""" for i in duplicate_result])
        if duplicate_sql != '':
            query = f"""INSERT INTO {target_table} (sn, timeStamp, {",".join(target_columns)}) SELECT
            substr(rowkey, 27, 24) as sn ,
            date_format(from_unixtime(cast(substr(cast(9223372036854775807-cast(substr(rowkey, 8, 19) as bigint) as varchar),1,10) as bigint), 'Asia/Shanghai'), '%Y-%m-%d %H:%i:%s' ) as timeStamp,
            {",".join(source_columns)}, {duplicate_sql}  FROM {source_table} WHERE"""
        else:
            query = f"""INSERT INTO {target_table} (sn, timeStamp, {",".join(target_columns)}) SELECT
            substr(rowkey, 27, 24) as sn ,
            date_format(from_unixtime(cast(substr(cast(9223372036854775807-cast(substr(rowkey, 8, 19) as bigint) as varchar),1,10) as bigint), 'Asia/Shanghai'), '%Y-%m-%d %H:%i:%s' ) as timeStamp,
            {",".join(source_columns)}  FROM {source_table} WHERE"""
        query += f''' material in ({"'" + "','".join(materials) + "'"}) '''
    query += f"and category = '{category}' "
    query = query.replace('"', "")
    query += f"and p_date = '{p_date}'"
    result = submit(query)
    print_to_log(p_date, materials + ": ", result)
