import json
import logging
import random
import re
import threading
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime

from product_upload.core.basic import get_amazon_us_config_price_strategy, get_prod_price_by_class_name
from product_upload.domain.amazon_us.amazon_us_record import AmazonUsRecord
from product_upload.domain.amazon_us.amazon_us_tag import AmazonUsTag
from product_upload.domain.amazon_us.amazon_us_upload import AmazonUsUpload
from product_upload.domain.basic.basic_product import BasicProduct
from product_upload.util.basic.common_util import sleep_random_duration, send_dingtalk_message, extract_field_number, split_html_tag, split_langer_txt, shuffle_list_except_first_two_elements, remove_sensitive_words
from product_upload.util.basic.file_util import create_folder, copy_and_rename_file_with_row_check, write_to_amazon_template_xlsm, move_files_to_shared_folder
from product_upload.util.basic.mysql_util import db_list_by_page, db_get_one, db_batch_update, db_batch_insert, db_list_by_page_v2
from product_upload.util.basic.openai_util import model_request
from product_upload.util.seller.amazon_us_new_util import get_real_product_type, get_xlsm_data_for_product, get_common_data, get_template_file_path

logger = logging.getLogger(__name__)


def simplified_check_params(_field_map, _rule_map):
    # 获取合法值映射
    valid_values = _rule_map.get("valid_values", {})
    # 步骤 1: 处理单位字段
    unit_fields = [k for k in _field_map.keys() if k.endswith('unit')]
    for unit_field in unit_fields:
        # 推断对应的数值字段
        value_field = unit_field.replace('unit', 'value')
        # 如果数值字段不存在，填充默认值
        if value_field not in _field_map:
            _field_map[value_field] = 1  # 默认填充为 1
        # 检查单位字段的值是否合法
        optional_list = valid_values.get(unit_field, [])
        if optional_list and _field_map[unit_field] not in optional_list:
            _field_map[unit_field] = select_val(optional_list)
    # 步骤 2: 处理数值字段
    value_fields = [k for k in _field_map.keys() if k.endswith('value')]
    for value_field in value_fields:
        # 推断对应的单位字段
        unit_field = value_field.replace('value', 'unit')
        # 如果单位字段不存在，填充默认单位
        if unit_field not in _field_map:
            optional_list = valid_values.get(unit_field, [])
            if optional_list:
                _field_map[unit_field] = select_val(optional_list)
            else:
                del _field_map[value_field]
    # 步骤 3: 清理空值字段
    keys_to_remove = [k for k, v in _field_map.items() if v is None or v == ""]
    for key in keys_to_remove:
        del _field_map[key]


def select_val(optional_list_):
    if "Inches" in optional_list_:
        return "Inches"
    elif "Pounds" in optional_list_:
        return "Pounds"
    else:
        return optional_list_[0]


def is_number(s):
    return bool(re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", str(s)))


def gpt_guess_field_value_task(product_information, field_definitions, field_options, result_dict):
    guess_sys_prompt = """
Based on the provided 'product information', fill in the values for the fields listed in 'need fill field' according to the following rules:
- For fields describing product characteristics (e.g., material, color, flavor), prioritize extracting the exact value from 'product information', even if it is in 'valid_values'.
- For other fields, if 'valid_values' are provided, select the most suitable value from 'valid_values' based on 'product information'.
- If a field cannot be determined from 'product information':
- If 'valid_values' are available, choose the closest matching value based on product context.
- If no 'valid_values' are provided, use "N/A" for non-numerical fields.
- For numerical fields (e.g., size, weight), provide pure numbers without units; if undetermined, use 1. Units are standardized: inches for size, pounds for weight.
- For range fields (e.g., length_range, width_range), if not directly specified, infer the range based on the product's dimensions and the examples in 'field_definitions'.
- Ensure all fields are filled.
Return the result in JSON format: {"field1": "value1", "field2": "value2", ...}
"""
    user_prompt = f'need fill field:\n{json.dumps(list(field_definitions.keys()))}\n'
    if len(field_options) > 0:
        user_prompt += f'Valid_values:\n{json.dumps(field_options)}\n'
    user_prompt += f'field_definitions:\n{json.dumps(field_definitions)}\n'
    user_prompt += f'\nproduct information:\n{product_information}'
    # "gemini-2.0-flash-001"
    # guess_field_dict = model_request(True, user_prompt, guess_sys_prompt, model="gemini-2.0-flash-001")
    guess_field_dict = model_request(True, user_prompt, guess_sys_prompt, model="gpt-4o-mini")
    result_dict.update(guess_field_dict)


def gpt_title_bullet_desc(product_info, result_dict):
    user_prompt = f'product information:\n{product_info}'
    prompt_list = get_common_data().get("prompt")
    sys_prompt = random.choice(prompt_list)
    desc_dict = model_request(True, user_prompt, sys_prompt)
    desc_dict["product_description#1.value"] = split_html_tag(desc_dict.get("product_description", ""))
    desc_dict["item_name#1.value"] = desc_dict.get("product_title", None)
    if not desc_dict.get("item_name#1.value"):
        desc_dict["item_name#1.value"] = None
    if not desc_dict.get("bullet_points", []):
        desc_dict["bullet_points"] = None
    if not desc_dict.get("product_description#1.value"):
        desc_dict["product_description#1.value"] = None
    result_dict.update(desc_dict)


# 填充基础字段
def fill_base_field(_db_dict):
    search_words_map = json.loads(_db_dict.get('search_words', "{}"))
    if len(search_words_map) != 3:
        return {}
    keywords_next = search_words_map.get("Primary Keywords")[0:4] + search_words_map.get("Long-tail Keywords")[0:3] + search_words_map.get("Related Keywords")[0:3]
    generic_keywords = split_langer_txt(keywords_next)
    feed_product_type = _db_dict.get('product_type')
    packages = _db_dict.get('packages')
    dimensions = _db_dict.get('dimensions')
    base_field_dict = _db_dict
    item_length = dimensions[0]
    item_width = dimensions[1]
    item_height = dimensions[2]
    package_length = packages[0]
    package_width = packages[1]
    package_height = packages[2]
    catalog_name = _db_dict.get('catalog_name')
    if not catalog_name:
        valid_values = get_xlsm_data_for_product(feed_product_type).get("valid_values")
        if valid_values:
            catalog_name = valid_values.get("item_type_keyword#1.value", [""])[0]
    base_field_dict.update({
        "language#1.type": "Manual",
        "language#1.value": "English",
        "record_action#1.value": "Full Update",
        "generic_keyword#1.value": generic_keywords,
        "product_type#1.value": feed_product_type,
        "item_type_keyword#1.value": catalog_name,
        "item_package_quantity#1.value": 1,
        "scent#1.value": "Unscented",
        "configuration#1.value": "N/A",
        "flavor#1.value": "N/A",
        "edition#1.value": "Standard Edition",
        "customer_package_type#1.value": "Standard Packaging",
        "item_package_dimensions#1.length.value": package_length,
        "item_package_dimensions#1.length.unit": "Inches",
        "item_package_dimensions#1.width.value": package_width,
        "item_package_dimensions#1.width.unit": "Inches",
        "item_package_dimensions#1.height.value": package_height,
        "item_package_dimensions#1.height.unit": "Inches",
        "item_package_weight#1.value": packages[3],
        "item_package_weight#1.unit": "Pounds",
        "item_dimensions_fraction#1.length.decimal_value": item_length if item_length and item_length != 1 else package_length,
        "item_dimensions_fraction#1.length.unit": "Inches",
        "item_dimensions_fraction#1.width.decimal_value": item_width if item_width and item_width != 1 else package_width,
        "item_dimensions_fraction#1.width.unit": "Inches",
        "item_dimensions_fraction#1.height.decimal_value": item_height if item_height and item_height != 1 else package_height,
        "item_dimensions_fraction#1.height.unit": "Inches",
        "item_display_weight#1.value": dimensions[3],
        "item_display_weight#1.unit": "Pounds",
        "item_weight#1.value": dimensions[3],
        "item_weight#1.unit": "Pounds",
    })

    # 特异性尺寸字段参数
    base_field_dict.update({
        "item_depth_width_height#1.depth.value": item_length if item_length and item_length != 1 else package_length,
        "item_depth_width_height#1.depth.unit": "Inches",
        "item_depth_width_height#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_depth_width_height#1.width.unit": "Inches",
        "item_depth_width_height#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_depth_width_height#1.height.unit": "Inches",

        "item_display_dimensions#1.depth.value": item_height if item_height and item_height != 1 else package_height,
        "item_display_dimensions#1.depth.unit": "Inches",
        "item_display_dimensions#1.diameter.value": item_width if item_width and item_width != 1 else package_width,
        "item_display_dimensions#1.diameter.unit": "Inches",
        "item_display_dimensions#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_display_dimensions#1.height.unit": "Inches",
        "item_display_dimensions#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_display_dimensions#1.length.unit": "Inches",
        "item_display_dimensions#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_display_dimensions#1.width.unit": "Inches",

        "master_pack_dimensions#1.height.value": package_height,
        "master_pack_dimensions#1.height.unit": "Inches",
        "master_pack_dimensions#1.length.value": package_length,
        "master_pack_dimensions#1.length.unit": "Inches",
        "master_pack_dimensions#1.width.value": package_width,
        "master_pack_dimensions#1.width.unit": "Inches",
        "master_pack_weight#1.value": packages[3],
        "master_pack_weight#1.unit": "Pounds",

        "item_length_width#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_length_width#1.length.unit": "Inches",
        "item_length_width#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_length_width#1.width.unit": "Inches",

        "item_length_width_thickness#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_length_width_thickness#1.length.unit": "Inches",
        "item_length_width_thickness#1.thickness.value": item_height if item_height and item_height != 1 else package_height,
        "item_length_width_thickness#1.thickness.unit": "Inches",
        "item_length_width_thickness#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_length_width_thickness#1.width.unit": "Inches",

        "item_length_width_height#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_length_width_height#1.height.unit": "Inches",
        "item_length_width_height#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_length_width_height#1.length.unit": "Inches",
        "item_length_width_height#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_length_width_height#1.width.unit": "Inches",

        "item_dimensions#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_dimensions#1.length.unit": "Inches",
        "item_dimensions#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_dimensions#1.width.unit": "Inches",
        "item_dimensions#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_dimensions#1.height.unit": "Inches",

        "item_width_height#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_width_height#1.height.unit": "Inches",
        "item_width_height#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_width_height#1.width.unit": "Inches",

        "item_length_width_depth#1.depth.value": item_height if item_height and item_height != 1 else package_height,
        "item_length_width_depth#1.depth.unit": "Inches",
        "item_length_width_depth#1.length.value": item_length if item_length and item_length != 1 else package_length,
        "item_length_width_depth#1.length.unit": "Inches",
        "item_length_width_depth#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_length_width_depth#1.width.unit": "Inches",

        "item_width_diameter_height#1.diameter.value": item_length if item_length and item_length != 1 else package_length,
        "item_width_diameter_height#1.diameter.unit": "Inches",
        "item_width_diameter_height#1.height.value": item_height if item_height and item_height != 1 else package_height,
        "item_width_diameter_height#1.height.unit": "Inches",
        "item_width_diameter_height#1.width.value": item_width if item_width and item_width != 1 else package_width,
        "item_width_diameter_height#1.width.unit": "Inches",

        "master_pack_layers_per_pallet_quantity#1.value": 1,
        "master_packs_per_layer_quantity#1.value": 1,
        "item_volume#1.value": 1,
        "item_volume#1.unit": "Microliters",
        "unit_count#1.value": 1,
        "unit_count#1.type.value": "count"

    })

    # 合规性参数
    base_field_dict.update({
        "condition_type#1.value": "New",
        "country_of_origin#1.value": "China",
        "batteries_required#1.value": "No",
        "batteries_included#1.value": "No",
        "supplier_declared_dg_hz_regulation#1.value": "Not Applicable",
        "is_expiration_dated_product#1.value": "No",
        "fulfillment_availability#1.fulfillment_channel_code": "DEFAULT",
    })
    return base_field_dict


# 去打标签
def thread_gen_amazon_field_json_and_update_db(basic_product_list, is_append=False, append_fields=None):
    if append_fields is None:
        append_fields = []
    create_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    need_add_list = []
    for tmp_product in basic_product_list:
        if get_real_product_type(tmp_product.product_type) == "":
            continue
        if "amazon" in tmp_product.not_available.lower():
            continue
        db_dict = json.loads(tmp_product.json_text)
        db_dict["id"] = tmp_product.id
        db_dict["platform"] = tmp_product.platform
        db_dict["product_type"] = get_real_product_type(tmp_product.product_type)
        db_dict["class_name"] = tmp_product.class_name
        db_dict["catalog_name"] = tmp_product.catalog_name
        db_dict["product_info"] = tmp_product.product_info
        db_dict["search_words"] = tmp_product.search_words
        if db_dict.get('price'):
            del db_dict['price']
        if db_dict.get('quantity'):
            del db_dict['quantity']
        # Step 1: 收集手动启动的参数
        res_dict = fill_base_field(db_dict)
        if len(res_dict) == 0:
            return
        rule_map = get_xlsm_data_for_product(res_dict["product_type"])
        if rule_map is None:
            return
        # 取出全量产品信息。替换掉采集时分割占位符
        prod_info = db_dict["product_info"]
        # Step 2: 准备好AI参数，以及提交AI
        required_field = list(set(rule_map["required"]) - set(get_common_data()["not_ai_field"]))
        # 电池过滤
        required_field = [x for x in required_field if "battery" not in x or "batteries" not in x]
        required_field.extend([x.strip() for x in append_fields])
        extra_field = required_field
        definition_map = rule_map["data_definition"]
        valid_values = rule_map["drop"]
        valid_values.update(rule_map["valid_values"])
        if len(extra_field) > 0:
            extra_definition = {key: definition_map[key] for key in extra_field if key in definition_map}
            extra_valid_values = {key: valid_values[key] for key in extra_field if key in valid_values}
            gpt_guess_field_value_task(prod_info, extra_definition, extra_valid_values, res_dict)
        # 进行参数校验,只校验单位字段
        # todo 暂时去掉单位匹配校验,目前有问题
        # simplified_check_params(res_dict, rule_map)
        # 更新amazon_us_tag
        temp_field = rule_map["required"]
        temp_field.extend(get_common_data()["not_ai_field"])
        base_field = list(set(temp_field) - set(extra_field))
        base_field.append("item_type_keyword#1.value")
        base_json = {key: res_dict[key] for key in base_field if key in res_dict}
        extra_json = {key: res_dict[key] for key in extra_field if key in res_dict}
        base_json_str = json.dumps(base_json) if base_json else "{}"
        extra_json_str = json.dumps(extra_json) if extra_json else "{}"
        amazon_us_tag_obj = db_get_one("amazon_us_tag", f"basic_id={res_dict['id']}", AmazonUsTag)
        if amazon_us_tag_obj is None and not is_append:
            values = [db_dict.get("id"), db_dict.get("platform"), db_dict.get("sku"), create_time, db_dict.get("product_type"), db_dict.get("class_name"), base_json_str, extra_json_str]
            need_add_list.append(values)
        elif amazon_us_tag_obj and is_append:
            update_fields = ["id", "base_json", "extra_json", "remark"]
            db_batch_update("amazon_us_tag", update_fields, [[amazon_us_tag_obj.id, base_json_str, extra_json_str, f'AI has re labeled origin:{",".join(append_fields)}']])
    if need_add_list:
        fields = ["basic_id", "basic_platform", "basic_sku", "create_time", "product_type", "class_name", "base_json", "extra_json"]
        db_batch_insert("amazon_us_tag", fields, need_add_list)


# 打标签
def batch_tag_amazon():
    logger.info('start:AmazonUs.batch tag...')
    # 先获取已经打过标签的ID
    with ThreadPoolExecutor(max_workers=1) as executor:
        futures = []
        for page in range(1, 2):
            conditions = 'product_type !="" and search_words like "{%}" and id not in (select basic_id from amazon_us_tag) '
            basic_product_list = db_list_by_page("basic_product", ",".join(BasicProduct.fields), conditions, BasicProduct, page, 50)
            if not basic_product_list:
                break
            basic_product_list = [prod for prod in basic_product_list if "amazon" not in prod.not_available.lower()]
            if basic_product_list:
                futures.append(executor.submit(thread_gen_amazon_field_json_and_update_db, basic_product_list))
        logger.info(f"amazonUs tag wait thread...")
        for future in as_completed(futures):
            try:
                future.result()
            except Exception as e:
                logger.info(f"amazonUs tag fail : {e}")
    logger.info('end:AmazonUs.tag...')


def gen_amazon_us_to_template_file(upload_id):
    logger.info(f"start:Amazon-US gen table to file upload_id:{upload_id}")
    upload = db_get_one("amazon_us_upload", f"id={upload_id}", AmazonUsUpload)
    if upload is None:
        return
    opt_name = upload.opt_name
    shop = upload.shop
    brand = upload.brand
    conditions_ = f'status = 0 and opt_name="{opt_name}" and brand="{brand}" and shop="{shop}"'
    record_list = db_list_by_page("amazon_us_record", "id,product_type,template_field", conditions_, AmazonUsRecord, 1, 99999)
    if len(record_list) == 0:
        db_batch_update("amazon_us_upload", ["id", "status"], [[upload_id, 2]])
        return
    record_dict = {}
    need_update_record_db = []
    for record in record_list:
        id_ = record.id
        template_field = record.template_field
        product_type = record.product_type
        template_data = record_dict.get(product_type, [])
        template_data.append(json.loads(template_field))
        record_dict[product_type] = template_data
        need_update_record_db.append([id_, 1, "success xlsm"])
    datestr = datetime.now().strftime("%Y%m%d_%H%M%S")
    write_amazon_xlsm(record_dict, datestr, shop, opt_name)
    db_batch_update("amazon_us_record", ["id", "status", "remark"], need_update_record_db)
    db_batch_update("amazon_us_upload", ["id", "status"], [[upload_id, 1]])
    logger.info("end:Amazon-US gen table to file upload_id:", upload_id)
    move_files_to_shared_folder()


# 去组装产生xlsm数据入record库
def gen_amazon_to_xlsm(opt_name, brand, not_class_name_list, platform, brand_zip, shop, upload_id, upload_limit=1000, limit_img=2, price=30, process_core=12):
    logger.info("start:AmazonUs gen xlsm...")
    platform_str = "1=1"
    not_class_str = "1=1"
    if platform:
        platform_str = f'bp.platform = "{platform}" '
    if not_class_name_list:
        not_class_name_list = [f'"{x}"' for x in not_class_name_list]
        not_class_str = f'bp.class_name not in ({",".join(not_class_name_list)}) '
    # 获取基础表中的所有该大类的，不需要判断是否可用于amazon平台，在打ai标签那已经判断
    not_update_type_list = '"WALL_ART","wallart"'
    condition = f"SELECT bp.id FROM basic_product bp INNER JOIN amazon_us_tag aut ON bp.id = aut.basic_id LEFT JOIN amazon_us_record aur ON bp.id = aur.basic_id AND aur.brand = '{brand}' WHERE {platform_str} and {not_class_str} and bp.product_type not in ({not_update_type_list}) and bp.quantity > 3 and bp.not_available not like '%amazon%' and bp.status = 1 AND bp.published = 1 AND bp.price >= {price}  AND bp.image_count >= {limit_img}  AND aur.basic_id IS NULL ORDER BY bp.quantity DESC"
    need_upload_id_list = db_list_by_page_v2(condition, 1, upload_limit)
    if not need_upload_id_list:
        db_batch_update("amazon_us_upload", ["id", "status", "remark"], [[upload_id, 2, "组合上完了"]])
        send_dingtalk_message(f'Amazon-US上传表格提前结束\n品牌"{brand}"与条件:价格大于等于{price},至少图片:{limit_img}张已上完,请换个条件或品牌.')
        return
    need_upload_id_list = [x[0] for x in need_upload_id_list]
    # 对要上的basic_id继续根据quantity排序,优先上数量多的
    # 各自商品的价格系数
    price_strategy_map = get_amazon_us_config_price_strategy()
    # 开始进行组装，以每页thread_page_size个进行查询
    threads = []
    logger.info(f'Amazon-US,{shop},gen count:{len(need_upload_id_list)}...')
    thread_page_size = len(need_upload_id_list) // process_core + 1
    for page in range(0, len(need_upload_id_list), thread_page_size):
        sleep_random_duration()
        loop_need_id_str = [str(x) for x in need_upload_id_list[page:page + thread_page_size]]
        basic_product_list_page = db_list_by_page("basic_product", "id,platform,sku,class_name,product_type,search_words,price,ship_fee,quantity,product_info,json_text", f'published = 1 and status = 1 and id in ({",".join(loop_need_id_str)})', BasicProduct, 1, thread_page_size * 2)
        tag_list_page = db_list_by_page("amazon_us_tag", "basic_id,base_json,extra_json", f'basic_id in ({",".join(loop_need_id_str)})', AmazonUsTag, 1, thread_page_size * 2)
        if tag_list_page is None:
            logger.info(f"find a bug query tag....{page}")
            continue
        # 图片需要及时取用basic表的
        basic_img_map = {}
        for basic_prod in basic_product_list_page:
            basic_id = basic_prod.id
            json_text = json.loads(basic_prod.json_text)
            img_list = json_text.get("images_list", [])
            img_list = [x for x in img_list if x and len(x) > 5]
            # 对图片第三张后面进行随机排列
            images_list = shuffle_list_except_first_two_elements(img_list)
            images_list = images_list + (9 - len(images_list)) * ['']
            img_map = {"main_product_image_locator#1.media_location": images_list[0], "other_product_image_locator_1#1.media_location": images_list[1],
                       "other_product_image_locator_2#1.media_location": images_list[2], "other_product_image_locator_3#1.media_location": images_list[3],
                       "other_product_image_locator_4#1.media_location": images_list[4], "other_product_image_locator_5#1.media_location": images_list[5],
                       "other_product_image_locator_6#1.media_location": images_list[6], "other_product_image_locator_7#1.media_location": images_list[7],
                       "other_product_image_locator_8#1.media_location": images_list[8]}
            basic_img_map[basic_id] = img_map
        loop_tag_list = []
        for tag in tag_list_page:
            # 更替掉新的basic图片
            base_json = json.loads(tag.base_json)
            base_json.update(basic_img_map.get(tag.basic_id, {}))
            loop_tag_list.append([tag.basic_id, json.dumps(base_json), tag.extra_json])
        tag_map = {x[0]: [x[1], x[2]] for x in loop_tag_list}
        # 每个线程组的数据
        loop_dict = {}
        for basic_product in basic_product_list_page:
            basic_id = basic_product.id
            if not tag_map.get(basic_id, None):
                continue
            tmp_dict = {"platform": basic_product.platform, "basic_sku": basic_product.sku, "product_type#1.value": basic_product.product_type,
                        "search_words": basic_product.search_words, "product_info": basic_product.product_info}
            tmp_dict.update(json.loads(tag_map.get(basic_id)[0]))
            if len(tag_map.get(basic_id)[1]) > 0:
                tmp_dict.update(json.loads(tag_map.get(basic_id)[1]))
            quantity = int(basic_product.quantity)
            # 需要加上运费,价格随机波动
            price = round(float(basic_product.price) + float(basic_product.ship_fee), 2)
            if not price:
                continue
            change_price = get_prod_price_by_class_name(price, basic_product.class_name, price_strategy_map)
            tmp_dict.update({"quantity": quantity, "price": change_price, "origin_price": price})
            loop_dict[basic_id] = tmp_dict
        # 启动线程，并将线程添加到线程列表中
        thread_ = threading.Thread(target=fill_content_field, args=(loop_dict, opt_name, brand, brand_zip, shop))
        thread_.start()
        threads.append(thread_)
    # 去生产表格,等待所有线程执行完毕
    for this_thread in threads:
        this_thread.join()
    # 所有线程执行完毕后，调用生成表格的函数
    logger.info("end:Amazon-US gen table")
    gen_amazon_us_to_template_file(upload_id)


def fill_content_field(ai_dict_total, opt_name, brand, brand_zip, shop):
    id_list = list(ai_dict_total.keys())
    datestr = datetime.now().strftime("%y%m%d")
    db_content_list = []
    db_record_list = []
    create_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    for index, _id in enumerate(id_list):
        _this_dict = ai_dict_total[_id]
        # 对反查流量词进行更替,每次选取随机
        search_words_map = json.loads(_this_dict.get('search_words', "{}"))
        if len(search_words_map) == 3:
            keywords_next = random.sample(search_words_map.get("Primary Keywords"), 3) + random.sample(search_words_map.get("Related Keywords"), 4) + random.sample(search_words_map.get("Long-tail Keywords"), 3)
            generic_keywords = split_langer_txt(keywords_next)
            _this_dict["generic_keyword#1.value"] = generic_keywords
        # 出文案,需要过滤产品信息品牌
        sensitive_map = get_common_data()["sensitive"]
        if not sensitive_map:
            continue
        sensitive_words = [word for key, values in sensitive_map.items() if key not in ["need_filter_field"] for word in values]
        filter_product_info = remove_sensitive_words(_this_dict.get("product_info"), sensitive_words)
        gpt_title_bullet_desc(filter_product_info, _this_dict)
        if any(key not in _this_dict or _this_dict[key] in (None, '', []) for key in ("item_name#1.value", "bullet_points", "product_description#1.value")):
            continue
        # 五个卖点语句
        len_bullet = _this_dict.get("bullet_points", [])
        if not len_bullet:
            continue
        for idx in range(len(len_bullet)):
            key = f"bullet_point#{str(idx + 1)}.value"
            value = _this_dict["bullet_points"][idx]
            _this_dict[key] = value
        # 只进行敏感词过滤,和AI产生的牌子过滤
        sensitive_words = sensitive_map["words"] + sensitive_map.get("ai", [])
        for field_tmp in sensitive_map["need_filter_field"]:
            _this_dict[field_tmp] = remove_sensitive_words(_this_dict.get(field_tmp), sensitive_words)
        _this_dict["item_name#1.value"] = brand + " " + _this_dict.get("item_name#1.value")
        # 放入字段
        platform = _this_dict["platform"]
        basic_sku = _this_dict["basic_sku"]
        price = _this_dict["price"]
        origin_quantity = _this_dict["quantity"]
        _this_dict["fulfillment_availability#1.quantity"] = 0 if origin_quantity < 4 else int(origin_quantity * 0.4)
        _this_dict["purchasable_offer#1.our_price#1.schedule#1.value_with_tax"] = price
        _this_dict["list_price#1.value"] = max(price, round(_this_dict["origin_price"] * 1.8, 2))
        # 额外加上
        rule_map = get_xlsm_data_for_product(_this_dict.get("product_type#1.value"))
        item_sku = f'{platform}-{basic_sku}-{brand_zip}-{datestr}-{opt_name}'
        if len(item_sku) >= 40:
            item_sku = f'{platform}-{basic_sku}-{brand_zip}{opt_name}'
        _this_dict["contribution_sku#1.value"] = item_sku
        _this_dict["brand#1.value"] = brand
        _this_dict["manufacturer#1.value"] = brand
        random_str = str(uuid.uuid4()).upper().replace("-", "")[:12] + str(random.randint(0, 1000))
        _this_dict["model_number#1.value"] = f'{random_str}{opt_name}'
        _this_dict["model_name#1.value"] = " ".join(_this_dict.get("product_title").split(" ")[0:6])
        _this_dict["part_number#1.value"] = random_str
        # 加入纯数值过滤,对替换掉_unit_of_measure还有字段值的,和number_of,capacity
        # for field, field_val in _this_dict.items():
        #     if field.endswith("value"):
        #         _this_dict[field] = extract_field_number(field, field_val)
        # 放入template表格的数据
        template_list = rule_map["template"]
        res_list = [_this_dict.get(key, "") for key in template_list]
        # 第二个价格需要设置空,他是商家价格
        indices = [i for i, x in enumerate(template_list) if x == "purchasable_offer#1.our_price#1.schedule#1.value_with_tax"]
        if len(indices) >= 2:
            last_index = indices[-1]
            res_list[last_index] = ""
        if any(isinstance(item, (dict, list)) for item in res_list):
            logger.info(f"sku:{item_sku},find a not type,tmp skip")
        else:
            feed_product_type = _this_dict["product_type#1.value"]
            this_map = get_common_data().get("class_type_map")
            class_name = this_map.get(feed_product_type)
            content = {"item_name": _this_dict["item_name#1.value"], "bullet_points": _this_dict["bullet_points"], "product_description": _this_dict["product_description#1.value"]}
            db_content_list.append([_id, basic_sku, platform, brand, json.dumps(content), create_time, class_name, feed_product_type])
            record = [0, create_time, item_sku, _id, platform, basic_sku, price, origin_quantity, opt_name, shop, brand,
                      brand_zip, json.dumps(res_list), class_name, feed_product_type, "wait upload"]
            db_record_list.append(record)
    if len(db_content_list) > 0:
        db_batch_insert("amazon_us_content", ["basic_id", "basic_sku", "basic_platform", "brand", "content", "create_time", "class_name", "product_type"], db_content_list)
    if len(db_record_list) > 0:
        db_batch_insert("amazon_us_record", ["status", "create_time", "sku", "basic_id", "basic_platform", "basic_sku", "price", "quantity", "opt_name", "shop", "brand", "brand_zip", "template_field", "class_name", "product_type", "remark"], db_record_list)


def write_amazon_xlsm(_record_dict, datestr, shop, opt_name):
    dest_folder = create_folder(shop, datestr + "-product")
    product_type_list = list(set(_record_dict.keys()))
    for type_ in product_type_list:
        file_path = get_template_file_path(type_)
        new_file_path = copy_and_rename_file_with_row_check(file_path, dest_folder, f'{type_}-{opt_name}-{str(uuid.uuid4()).upper().replace("-", "")[:10]}.xlsm')
        write_to_amazon_template_xlsm(new_file_path, _record_dict[type_])


if __name__ == '__main__':
    # batch_tag_amazon()
    gen_amazon_us_to_template_file(4)
    #gen_amazon_to_xlsm("HN","Heemab",[],None,"HM","ZLDTECH",4)
    pass
