import glob
import json
import os
import re
from datetime import datetime, timedelta

from product_upload.domain.amazon_us.amazon_us_record import AmazonUsRecord
from product_upload.domain.basic.basic_image import BasicImage
from product_upload.domain.basic.basic_product import BasicProduct
from product_upload.domain.basic.basic_supplier import BasicSupplier
from product_upload.util.basic.common_util import load_json
from product_upload.util.basic.file_util import create_folder, write_2d_list_to_txt
from product_upload.util.basic.mysql_util import db_list_by_page, db_batch_update, db_get_one, db_batch_insert, db_delete
from product_upload.util.seller.amazon_us_util import old_get_real_product_type, old_get_common_data

numbers_field = [
    "length_longer_edge",
    "width_shorter_edge",
    "seat_height",
    "thickness_floor_to_top",
    "thickness_width_side_to_side",
    "thickness_head_to_toe",
    "maximum_weight_recommendation",
    "capacity",
    "item_length_numeric",
    "item_dimension_width",
    "item_dimension_depth",
    "product_thickness_dimension",
    "product_height_dimension",
    "product_width_dimension",
    "item_height_dimension",
    "item_thickness_dimension",
    "maximum_height",
    "maximum_weight_capacity",
    "number_of_speeds",
    "number_of_wheels",
    "maximum_tilt_angle",
    "maximum_stride_length",
    "cutting_width",
    "website_shipping_weight",
    "item_diameter_derived",
    "item_length_description",
    "number_of_channels",
    "memory_storage_capacity",
    "output_wattage",
    "maximum_compatible_size",
    "minimum_compatible_size",
    "number_of_panels",
    "maximum_reading_interest_age",
    "minimum_reading_interest_age",
    "pages",
    "band_size_num",
    "number_of_height_positions",
    "number_of_sets",
    "frequency",
    "stone_shape",
]

num_field_map = {
    "length_longer_edge": "item_length",
    "item_length_numeric": "item_length",
    "item_length_description": "item_length",
    "width_shorter_edge": "item_width",
    "item_dimension_width": "item_width",
    "product_width_dimension": "item_width",
    "item_thickness_dimension": "item_height",
    "seat_height": "item_height",
    "thickness_floor_to_top": "item_height",
    "product_height_dimension": "item_height",
    "item_height_dimension": "item_height",
    "maximum_height": "item_height",
    "depth_front_to_back": "item_length",
    "depth_width_side_to_side": "item_width",
    "depth_height_floor_to_top": "item_height",
    "height_width_side_to_side": "item_width",
    "height_floor_top": "item_height",
    "length_head_to_toe": "item_length",
    "length_width_side_to_side": "item_width",
    "length_height_floor_to_top": "item_height",
    "thickness_head_to_toe": "item_height",
}


def extract_number(text):
    # 定义一个映射，用于将英文单词转换为对应的数字
    word_to_num = {
        'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4,
        'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9,
        'ten': 10
    }

    # 尝试直接匹配数字，包括整数和浮点数
    match = re.search(r'(\d+(\.\d+)?)', text)
    if match:
        return round(float(match.group(1)), 2)

    # 如果没有找到数字，尝试匹配英文单词表示的数字
    for word, num in word_to_num.items():
        if word in text.lower():
            return num
    # 如果没有任何匹配，返回默认值 1
    return 1


# 刷新数据库中打好标签的数值，尺寸....
def modify_amazon_us_field_numbers():
    amazon_list = db_list_by_page("amazon_us_tag", "id, base_json, extra_json", "1=1", None, 1, 100000)
    need_update = []
    for amazon_item in amazon_list:
        extra = "{}"
        base = json.loads(amazon_item[1])
        base.update({"supplier_declared_material_regulation1": "Not Applicable"})
        if amazon_item[2] is not None and amazon_item[2] != "":
            extra = json.loads(amazon_item[2])
            for item in list(extra.keys()):
                if "included_components1" == item:
                    extra[item] = "others"
                if "special_features1" == item:
                    extra[item] = "others"
                if item in numbers_field:
                    extra[item] = extract_number(str(extra[item]))
                if item in num_field_map:
                    extra[item] = base[num_field_map.get(item)]
                    if item == "item_length_description":
                        extra[item] = str(base[num_field_map.get(item)]) + "IN"
        for item in list(base.keys()):
            if item in num_field_map:
                base[item] = base[num_field_map.get(item)]
        need_update.append([amazon_item[0], json.dumps(base), json.dumps(extra)])
    db_batch_update("amazon_us_tag", ["id", "base_json", "extra_json"], need_update)
    print(len(need_update))


# 刷新已经打的标签中 item_type字段
def modify_amazon_us_field_item_type():
    amazon_list = db_list_by_page("amazon_us_tag", "id,basic_id,basic_platform,basic_sku,base_json,extra_json", '1=1', None, 1, 100000)
    update_data = [list(x) for x in amazon_list]
    need_update_db = []
    for row in update_data:
        basic_data = db_get_one("basic_product", f"id={row[1]}", None)
        rule_map = old_get_real_product_type(basic_data[7])
        item_type = basic_data[9]
        if rule_map is not None:
            valid_values = rule_map["drop"]
            valid_values.update(rule_map["valid_values"])
            item_type_list = valid_values.get("item_type", [])
            if item_type not in item_type_list:
                item_type = item_type_list[0]
            base_json = json.loads(row[4])
            base_json.update({"item_type": item_type})
            need_update_db.append([row[0], json.dumps(base_json)])
        print(row[0])
    db_batch_update("amazon_us_tag", ["id", "base_json"], need_update_db)


# 刷新已经存在的库的大类和小类
def modify_table_class_type(table):
    amazon_list = db_list_by_page(table, "basic_id", '1=1', None, 1, 1000000)
    ids = [str(x[0]) for x in amazon_list]
    update_data = db_list_by_page("basic_product", "id,class_name,product_type", f'id in ({",".join(ids)})', None, 1, 1000000)
    class_map = {str(x[0]): x[1:] for x in update_data}
    need_update_db = []
    for basic_id_str in ids:
        class_type_list = class_map.get(basic_id_str, [])
        if class_type_list:
            need_update_db.append([int(basic_id_str), class_type_list[0], class_type_list[1]])
    db_batch_update(table, ["basic_id", "class_name", "product_type"], need_update_db)


# 刷新已经打的标签中大类和小类
def modify_amazon_us_content_class_type():
    amazon_list = db_list_by_page("amazon_us_content", "basic_id", 'class_name=""', None, 1, 10000000)
    ids = [str(x[0]) for x in amazon_list]
    update_data = db_list_by_page("basic_product", "id,class_name,product_type", f'id in ({",".join(ids)})', None, 1, 10000000)
    class_map = {str(x[0]): x[1:] for x in update_data}
    need_update_db = []
    for basic_id_str in ids:
        class_type_list = class_map.get(basic_id_str, [])
        if class_type_list:
            need_update_db.append([int(basic_id_str), class_type_list[0], class_type_list[1]])
    db_batch_update("amazon_us_content", ["basic_id", "class_name", "product_type"], need_update_db)


# 刷新记录数据库中class name....
def update_record_product_type_class():
    record_list = db_list_by_page("amazon_us_record", "basic_id", "class_name='others' or class_name =''", None, 1, 300000)
    record_basic_ids = [str(x[0]) for x in record_list]
    basic_info = db_list_by_page("basic_product", "id, product_type,class_name", f'platform ="GG" and id in ({",".join(record_basic_ids)})', None, 1, 300000)
    update_info = [list(x) for x in basic_info]
    db_batch_update("amazon_us_record", ["basic_id", "product_type", "class_name"], update_info)


sys_brand_prompt = """
    Task Description:
   You are to analyze the following text to determine whether it contains any brand names. Use the provided recognition standards to guide your reasoning. You are not allowed to rely on a predefined list of brands. Instead, use your general knowledge, linguistic cues, context, and trademark indicators to spot potential brand names. These may include well-known brands or smaller, lesser-known brands.
Recognition Standards:
1.Linguistic and Orthographic Features:
◦Unconventional spelling, unique letter combinations, mixed-case formats (e.g., “CamelCase”), or inclusion of special characters and numbers (e.g., “X-Tech360”).
◦Terms that lack a common dictionary meaning or do not resemble normal place names, personal names, or regular nouns.
◦Words that are conspicuously capitalized (especially in the middle of sentences) or presented in all caps (e.g., “SONY”), and may be accompanied by ™ or ® symbols.
2.Contextual Cues:
◦Words following phrases like “by [X],” “manufactured by [X],” “from [X brand],” or “officially licensed by [X].”
◦Terms promoted in a marketing or commercial context (e.g., “our exclusive [X],” “the [X] line”) that are not generic descriptive words.
◦Unique terms associated with “official distributor,” “original product,” “authentic guarantee,” or similar phrases that imply a proprietary source.
3.Domain-Specific Characteristics:
◦Distinctive terms linked to product categories like electronics, fashion, beauty, home appliances, sports gear, or automotive parts.
◦Foreign-language or hybrid words that do not translate easily into common English.
4.Trademark and Registration Indicators:
◦Presence of ™ or ®, or text like “registered trademark” or “brand registered.”
◦Any term followed by product models or series names (e.g., “XYZ X200 Series”) suggests “XYZ” is a brand.
5.Layered Analysis:
◦Multiple features combined (unusual spelling, capitalization, marketing context, mention as a product source).
◦Repeated occurrences of the same distinctive term in association with product features or proprietary claims.

Self-Check Requirement:
Before finalizing your answer, carefully review the identified terms to ensure they meet the criteria above. Confirm that you are not missing any potential brand names and that no generic terms are incorrectly flagged as brands. If uncertain, reconsider the reasoning steps to ensure accuracy.

Output Requirements:
Strictly return in JSON format.
If any brand name is identified, please list it without any explanation.For example:["kingbank", "apple",...]
If there is no brand, return an empty list.For example:[]
"""


# 映射basic库中的product type 和 class name
def modify_basic_class_name_product_type(table):
    # 把数据库中的 product_type进行替换正确。以及映射好 class_name
    class_type_map = old_get_common_data().get("class_type_map")
    for i in range(1, 300):
        page = db_list_by_page(table, "id,product_type", 'product_type !=""', None, i, 50000)
        if not page:
            break
        product_type_map = {x[0]: x[1] for x in page}
        update_list = []
        for id, typee in product_type_map.items():
            product_type = old_get_real_product_type(typee).strip()
            class_name = class_type_map.get(product_type.strip(), "").strip()
            update_list.append([id, product_type, class_name])
        db_batch_update(table, ["id", "product_type", "class_name"], update_list)
        print(i)


# 找出需要删除不在货源的sku
def tmp_select_not_orign_goods(json_path, shop):
    sku_list = load_json(json_path)
    record_list = db_list_by_page("amazon_us_record", "sku", f"shop ='{shop}'", AmazonUsRecord, 1, 999999999)
    product_list = db_list_by_page("basic_product", "sku", f"platform!='DB'", BasicProduct, 1, 999999999)
    db_basic_sku_list = [x.sku for x in product_list]
    db_sku_record_list = [x.sku for x in record_list]
    print("amazon:", len(sku_list))
    print("record:", len(db_sku_record_list))
    print("basic:", len(db_basic_sku_list))
    not_in_record = list(set(sku_list) - set(db_sku_record_list))
    in_basic = []
    for i, sku in enumerate(sku_list):
        print(i)
        for basic_sku in db_basic_sku_list:
            if basic_sku in sku:
                in_basic.append(sku)
                break
    not_in_basic = list(set(sku_list) - set(in_basic))
    try:
        with open(f"C:\\Users\\hunan\\Desktop\\not_in_data\\不在上架记录_{shop}.json", 'w', encoding='utf-8') as json_file:
            json.dump(not_in_record, json_file, indent=4, ensure_ascii=False)
        with open(f"C:\\Users\\hunan\\Desktop\\not_in_data\\不在总库_{shop}.json", 'w', encoding='utf-8') as json_file:
            json.dump(not_in_basic, json_file, indent=4, ensure_ascii=False)
    except Exception as e:
        print(e)


# 找出带前后空格的
def select_space_field():
    res = db_list_by_page("basic_supplier", "id,name", "1=1", BasicSupplier, 1, 9999999)
    update_ = [x for x in res if x.name.startswith(' ') or x.name.endswith(' ')]
    print(len(update_))
    update_items = []
    for item in update_:
        update_items.append([item.id, item.name.strip()])
    db_batch_update("basic_supplier", ["id", "name"], update_items)


def tmp_list_sku_img():
    sku_list = "skus".split("\n")
    sku_list = [x.replace("GG-", "").split("-TH-")[0] for x in sku_list if x]
    sku_str = [f'"{x}"' for x in sku_list]
    res = db_list_by_page("basic_image", ",".join(BasicImage.fields), f"sku in ({",".join(sku_str)})", BasicImage, 1, 20000)
    all = []
    create_time = datetime.now().strftime("%y-%m-%d")
    for basic_image in res:
        img_list = [f'GG-{basic_image.sku}-TH-{create_time}-XYL', basic_image.main_image, basic_image.other_image_url1, basic_image.other_image_url2,
                    basic_image.other_image_url3, basic_image.other_image_url4, basic_image.other_image_url5,
                    basic_image.other_image_url6, basic_image.other_image_url7, basic_image.other_image_url8,
                    basic_image.other_image_url9, basic_image.other_image_url10, basic_image.other_image_url11,
                    basic_image.other_image_url12, basic_image.other_image_url13, basic_image.other_image_url14,
                    basic_image.other_image_url15]
        all.append(img_list)
    all = [x for x in all if x]
    with open(r"C:\Users\hunan\Desktop\all.json", 'w', encoding='utf-8') as json_file:
        json.dump(all, json_file, indent=4, ensure_ascii=False)


def get_all_supplier_code():
    db_supp = db_list_by_page("basic_product", "supplier_code,supplier_name", "platform = 'GG'", BasicProduct, 1, 99999999)
    all_supplier = []
    supp_code = []
    for item in db_supp:
        if item.supplier_code not in supp_code:
            supp_code.append(item.supplier_code)
            create_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            all_supplier.append([1, "GG", create_time, item.supplier_code, item.supplier_name])
    db_batch_insert("basic_supplier", ["status", "platform", "create_time", "code", "name"], all_supplier)


def extract_skus_from_file(filepath):
    skus_price = {}
    with open(filepath, 'r', encoding='utf-8') as file:
        # 跳过表头行
        next(file)
        for line in file:
            # 假定sku在每行的第一个位置，且用制表符分隔
            sku = line.split('\t')[0]
            price = line.split('\t')[2]
            skus_price[sku] = price
    return skus_price


def get_all_skus_in_folder(folder_path):
    all_skus = []
    # 使用glob来获取文件夹中所有的txt文件
    for filepath in glob.glob(os.path.join(folder_path, "*.txt")):
        skus = extract_skus_from_file(filepath)
        all_skus.extend(skus)
    return all_skus


def get_amazon_us_not_can_price_sku(shop):
    sku_price_map = extract_skus_from_file(f"C:\\Users\\hunan\\Desktop\\amazon_report\\{shop}.txt")
    db_sku_price_map = {}
    need_update = []
    for page in range(1, 999):
        res_record_list = db_list_by_page("amazon_us_record", "sku,price", f"shop='{shop}'", AmazonUsRecord, page, 20000)
        tmp = {x.sku: x.price for x in res_record_list}
        if not res_record_list:
            break
        db_sku_price_map.update(tmp)
    for sku, price in sku_price_map.items():
        if sku in db_sku_price_map:
            if abs(round(float(price), 2) - round(float(db_sku_price_map[sku]), 2)) > 0.5:
                list_price = max(round(float(db_sku_price_map[sku]), 2), round((round(float(db_sku_price_map[sku]), 2) / 1.6) * 1.8), 2)
                need_update.append([sku, round(float(db_sku_price_map[sku]), 2), list_price])
    with open(f"C:\\Users\\hunan\\Desktop\\amazon_report\\{shop}.json", 'w', encoding='utf-8') as json_file:
        json.dump(need_update, json_file, indent=4, ensure_ascii=False)


def gen_txt_file_to_update_list_price(shop):
    header = ['sku', 'product-id', 'product-id-type', 'price', 'minimum-seller-allowed-price', 'maximum-seller-allowed-price', 'item-condition', 'quantity', 'add-delete', 'will-ship-internationally', 'expedited-shipping', 'standard-plus', 'item-note', 'fulfillment-center-id', 'product-tax-code',
              'handling-time', 'merchant_shipping_group_name', 'batteries_required', 'are_batteries_included', 'battery_cell_composition', 'battery_type', 'number_of_batteries', 'battery_weight', 'battery_weight_unit_of_measure', 'number_of_lithium_ion_cells', 'number_of_lithium_metal_cells',
              'lithium_battery_packaging', 'lithium_battery_energy_content', 'lithium_battery_energy_content_unit_of_measure', 'lithium_battery_weight', 'lithium_battery_weight_unit_of_measure', 'supplier_declared_dg_hz_regulation1', 'supplier_declared_dg_hz_regulation2',
              'supplier_declared_dg_hz_regulation3', 'supplier_declared_dg_hz_regulation4', 'supplier_declared_dg_hz_regulation5', 'hazmat_united_nations_regulatory_id', 'safety_data_sheet_url', 'item_weight', 'item_weight_unit_of_measure', 'item_volume', 'item_volume_unit_of_measure',
              'flash_point', 'ghs_classification_class1', 'ghs_classification_class2', 'ghs_classification_class3', 'california_proposition_65_compliance_type', 'california_proposition_65_chemical_names1', 'california_proposition_65_chemical_names2', 'california_proposition_65_chemical_names3',
              'california_proposition_65_chemical_names4', 'california_proposition_65_chemical_names5', 'List Price']
    datestr = datetime.now().strftime("%y%m%d")
    dest_folder = create_folder(shop, datestr + "update_list_price")

    record_list_page = db_list_by_page("amazon_us_record", "sku,price", f"create_time >= '2025-2-26 17:45:00' and shop = '{shop}' and price > 1", AmazonUsRecord, 1, 999999)
    for page in range(0, len(record_list_page), 9999):
        record_list = record_list_page[page: page + 9999]
        values = []
        for sku_item in record_list:
            tmp_sku_val = [''] * len(header)
            tmp_sku_val[0] = sku_item.sku
            tmp_sku_val[3] = round(sku_item.price - 1, 2)
            tmp_sku_val[-1] = round(sku_item.price * 1.125, 1)
            values.append(tmp_sku_val)
        values.insert(0, header)
        write_2d_list_to_txt(f'{dest_folder}{os.path.sep}{datestr}-{shop}_{page}_list_price.txt', values)


def get_point_date_sku_to_del_file(shop):
    filepath = f"C:\\Users\\hunan\\Desktop\\need_delete\\{shop}.txt"
    shop_map = {"Forio": {"brand": ["TREATLIFE HOME"], "brand_zip": ["TH"], "opt_name": ["XWY"]}, "KK_B11_Jason": {"brand": ["LOPOO"], "brand_zip": ["LP"], "opt_name": ["ZXY"]}, "ROVSHENYUSGJO_Inc": {"brand": ["Dyncan"], "brand_zip": ["Dy"], "opt_name": ["ZCY"]},
                "SVRCK": {"brand": ["SVRCK"], "brand_zip": ["SV"], "opt_name": ["PT"]}, "Xboun": {"brand": ["Udorich"], "brand_zip": ["Ud"], "opt_name": ["FAQ"]}, "Pengxiaohu_LJLB": {"brand": ["LJLB"], "brand_zip": ["LB"], "opt_name": ["PT", "ZXY"]},
                "ZLDTECH": {"brand": ["Heemab"], "brand_zip": ["HM"], "opt_name": ["OSQ", "CHB"]}, "UdorichUS": {"brand": ["Udorich"], "brand_zip": ["Ud"], "opt_name": ["FAQ"]}, "lixiaoli": {"brand": ["AGANS"], "brand_zip": ["AG"], "opt_name": ["PT", "ZXY"]},
                "Wangkui": {"brand": ["neolun"], "brand_zip": ["NL"], "opt_name": ["ZCY"]}, "HELIN": {"brand": ["YLIEHS"], "brand_zip": ["YH"], "opt_name": ["PT"]}, "ZHOUZHENXI": {"brand": ["Benicabaly"], "brand_zip": ["BC"], "opt_name": ["FAQ"]}}
    this_shop_opt_list = shop_map.get(shop).get("opt_name")
    skus_ = []
    with open(filepath, 'r', encoding='utf-8') as file:
        # 跳过表头行
        next(file)
        for line in file:
            # 假定sku在每行的第一个位置，且用制表符分隔
            sku = line.split('\t')[0]
            sku = sku.strip()
            sku_spl = sku.split("-")
            if len(sku_spl) >= 5 and len(sku_spl[-2]) == 6 and sku_spl[-1] in this_shop_opt_list:
                try:
                    date_int = int(sku_spl[-2])
                    if date_int > 250226:
                        skus_.append(sku.strip())
                except ValueError:
                    pass
    header = ["sku", "product-id", "product-id-type", "price", "minimum-seller-allowed-price", "maximum-seller-allowed-price", "item-condition", "quantity",
              "add-delete", "will-ship-internationally", "expedited-shipping", "standard-plus"    "item-note", "fulfillment-center-id", "product-tax-code", "handling-time",
              "business-price", "quantity-price-type", "quantity-lower-bound1", "quantity-price1", "quantity-lower-bound2", "quantity-price2", "quantity-lower-bound3", "quantity-price3", "quantity-lower-bound4", "quantity-price4", "quantity-lower-bound5", "quantity-price5",
              "progressive_discount_type", "progressive_discount_lower_bound1", "progressive_discount_value1",
              "progressive_discount_lower_bound2", "progressive_discount_value2", "progressive_discount_lower_bound3", "progressive_discount_value3", "pricing_action", "merchant_shipping_group_name", "batteries_required", "are_batteries_included", "battery_cell_composition",
              "battery_type", "number_of_batteries", "battery_weight", "battery_weight_unit_of_measure", "number_of_lithium_ion_cells", "number_of_lithium_metal_cells", "lithium_battery_packaging",
              "lithium_battery_energy_content", "lithium_battery_energy_content_unit_of_measure", "lithium_battery_weight", "lithium_battery_weight_unit_of_measure", "supplier_declared_dg_hz_regulation1", "supplier_declared_dg_hz_regulation2", "supplier_declared_dg_hz_regulation3",
              "supplier_declared_dg_hz_regulation4", "supplier_declared_dg_hz_regulation5", "hazmat_united_nations_regulatory_id", "safety_data_sheet_url", "item_weight", "item_weight_unit_of_measure",
              "item_volume", "item_volume_unit_of_measure", "flash_point", "ghs_classification_class1", "ghs_classification_class2", "ghs_classification_class3", "california_proposition_65_compliance_type", "california_proposition_65_chemical_names1",
              "california_proposition_65_chemical_names2",
              "california_proposition_65_chemical_names3", "california_proposition_65_chemical_names4", "california_proposition_65_chemical_names5"]
    write_sku_list = []
    for sku in skus_:
        if sku in ["GG-SG001420AAD-TH-250301-XWY", "SY-84026552-TH-250304-XWY", "GG-W465P185059-TH-250301-XWY", "GG-B180P172082-TH-250301-XWY"]:
            continue
        if sku in ['GG-B02257232-BC-250301-FAQ', 'GG-B078112161-BC-250301-FAQ', 'GG-W46577100-BC-250302-FAQ', 'GG-N735P177478B-HM-250303-CHB', 'DB-D0102H7JWC8-BC-250301-FAQ', 'DB-D0102H71W0X-NL-250226-ZCY', 'GG-W1510S00018-BC-250302-FAQ', 'DB-D0102HG3CWA-BC-250301-FAQ',
                   'DB-D0102HGZ52V-BC-250301-FAQ', 'DB-D0102HRMWA2-NL-250225-ZCY', 'GG-B02257232-BC-250301-FAQ', 'DB-D0102H5Q3JX-YH-250304-PT', 'DB-D0102HGNVS7-TH-250225-XWY', 'DB-D0102XT8LUG-NL-250301-ZCY', 'XL-91758-NL-250302-ZCY', 'SY-30443158-YH-250302-PT', 'DB-D0102H7FKET-TH-250227-XWY',
                   'DB-D0102HP3J4U-TH-250227-XWY']:
            continue
        val_list = len(header) * [""]
        val_list[0] = sku
        val_list[8] = 'x'
        write_sku_list.append(val_list)

    print(len(write_sku_list))
    for i in range(0, len(write_sku_list), 9999):
        this_write = write_sku_list[i:i + 9999]
        this_write.insert(0, header)
        write_2d_list_to_txt(f"C:\\Users\\hunan\\Desktop\\need_delete\\del_{shop}_{i}.txt", this_write)


def gen_delete_amazon_us_txt(filepath, shop):
    skus = []
    skus = load_json(filepath)
    # with open(filepath, 'r', encoding='utf-8') as file:
    #     # 跳过表头行
    #     next(file)
    #     for line in file:
    #         sku = line.split('\t')[0]
    #         if sku.startswith("GG") or sku.startswith("XL") or sku.startswith("SY") or sku.startswith("DB"):
    #             if (len(sku.split("-")) <= 3 and len(sku) < 20) or "basic" in sku.lower():
    #                 skus.append(sku)
    print(len(skus))
    header = ["sku", "product-id", "product-id-type", "price", "minimum-seller-allowed-price", "maximum-seller-allowed-price", "item-condition", "quantity",
              "add-delete", "will-ship-internationally", "expedited-shipping", "standard-plus"    "item-note", "fulfillment-center-id", "product-tax-code", "handling-time",
              "business-price", "quantity-price-type", "quantity-lower-bound1", "quantity-price1", "quantity-lower-bound2", "quantity-price2", "quantity-lower-bound3", "quantity-price3", "quantity-lower-bound4", "quantity-price4", "quantity-lower-bound5", "quantity-price5",
              "progressive_discount_type", "progressive_discount_lower_bound1", "progressive_discount_value1",
              "progressive_discount_lower_bound2", "progressive_discount_value2", "progressive_discount_lower_bound3", "progressive_discount_value3", "pricing_action", "merchant_shipping_group_name", "batteries_required", "are_batteries_included", "battery_cell_composition",
              "battery_type", "number_of_batteries", "battery_weight", "battery_weight_unit_of_measure", "number_of_lithium_ion_cells", "number_of_lithium_metal_cells", "lithium_battery_packaging",
              "lithium_battery_energy_content", "lithium_battery_energy_content_unit_of_measure", "lithium_battery_weight", "lithium_battery_weight_unit_of_measure", "supplier_declared_dg_hz_regulation1", "supplier_declared_dg_hz_regulation2", "supplier_declared_dg_hz_regulation3",
              "supplier_declared_dg_hz_regulation4", "supplier_declared_dg_hz_regulation5", "hazmat_united_nations_regulatory_id", "safety_data_sheet_url", "item_weight", "item_weight_unit_of_measure",
              "item_volume", "item_volume_unit_of_measure", "flash_point", "ghs_classification_class1", "ghs_classification_class2", "ghs_classification_class3", "california_proposition_65_compliance_type", "california_proposition_65_chemical_names1",
              "california_proposition_65_chemical_names2",
              "california_proposition_65_chemical_names3", "california_proposition_65_chemical_names4", "california_proposition_65_chemical_names5"]
    gen_this_file_list = []
    for sku in skus:
        val_list = len(header) * [""]
        val_list[0] = sku
        val_list[8] = 'x'
        gen_this_file_list.append(val_list)
    dest_folder = create_folder(shop)
    for i in range(0, len(gen_this_file_list), 19998):
        this_write = gen_this_file_list[i:i + 19998]
        this_write.insert(0, header)
        write_2d_list_to_txt(f"{dest_folder}{os.path.sep}del_{i}.txt", this_write)

today = datetime(2025, 4, 23)
formatted_today = today.strftime("%y%m%d")
date_pattern = r"(\d{6})"
# 字典记录前缀和日期计数器
prefix_date_counter = {}
def replace_date(string):
    global prefix_date_counter
    # 按 '-' 分割字符串
    parts = string.split('-')
    # 确保分割后至少有 3 个部分（前缀、日期、后缀）
    if len(parts) < 3:
        return string  # 如果格式不符合要求，直接返回原字符串
    # 提取日期部分（倒数第二个）
    old_date = parts[-2]
    # 构造前缀（去掉日期部分）
    prefix_parts = parts[:-2] + [parts[-1]]  # 前缀包括日期之前的和最后的部分
    prefix = '-'.join(prefix_parts)
    # 获取当前前缀的日期计数器
    if prefix not in prefix_date_counter:
        prefix_date_counter[prefix] = 0
    # 计算新的日期
    new_date = (today + timedelta(days=prefix_date_counter[prefix])).strftime("%y%m%d")
    # 更新计数器
    prefix_date_counter[prefix] += 1
    # 替换旧日期为新日期
    parts[-2] = new_date  # 替换倒数第二个部分
    new_string = '-'.join(parts)
    return new_string

def create_new_sku_to_record(origin_shop,dest_shop,brand_zip,brand,opt_name):

    db_res = db_list_by_page("amazon_us_record","sku,basic_id,basic_platform,basic_sku,price,quantity,class_name,product_type",f"class_name not in ('Automotive','Media products - books DVD Music software video','Jewelry','Shoes','Clothes & Decoration') and basic_id in (select id from basic_product where status = 1 and published =1) and shop='{origin_shop}' and brand = '{brand}'", AmazonUsRecord,1,999999)
    new_shop_data = []
    create_time = '2025-04-09 00:00:00'
    for row in db_res:
        old_sku = row.sku.replace("-OSQ",f"-{opt_name}").replace("-CHB",f"-{opt_name}").replace("-ZCY",f"-{opt_name}")
        new_sku = replace_date(old_sku)
        new_shop_data.append([1,brand,brand_zip,dest_shop,opt_name,new_sku, row.basic_id,row.basic_sku, row.basic_platform,row.price,row.quantity,row.class_name,row.product_type,create_time,create_time,f'follow sell from {origin_shop}'])
    db_batch_insert("amazon_us_record",["status","brand","brand_zip","shop","opt_name","sku","basic_id","basic_sku","basic_platform","price","quantity","class_name","product_type","create_time","update_time","remark"],new_shop_data)


def create_new_sku_to_record_by_point_excel(origin_shop, dest_shop, brand_zip, brand, opt_name):
    record_res = []
    need_follow_record = load_json(r'C:\Users\hunan\Desktop\YUANTINGRUI2Xboun.json')
    for asin, data in need_follow_record.items():
        record_res.append(f'"{data[1]}"')
    db_res = db_list_by_page("amazon_us_record", "sku,basic_id,basic_platform,basic_sku,price,quantity,class_name,product_type",
                             f"sku in ({','.join(record_res)}) and shop = '{origin_shop}'", AmazonUsRecord,
                             1, 999999)
    new_shop_data = []
    create_time = '2025-04-09 00:00:00'
    # 只换操作运营人,不换原sku组合运营人
    for row in db_res:
        # old_sku = row.sku.replace("-OSQ", f"-{opt_name}").replace("-CHB", f"-{opt_name}").replace("-ZCY", f"-{opt_name}")
        new_sku = replace_date(row.sku)
        new_shop_data.append([1, brand, brand_zip, dest_shop, opt_name, new_sku, row.basic_id, row.basic_sku, row.basic_platform, row.price, row.quantity, row.class_name, row.product_type, create_time, create_time, f'follow sell from {origin_shop}'])
    db_batch_insert("amazon_us_record", ["status", "brand", "brand_zip", "shop", "opt_name", "sku", "basic_id", "basic_sku", "basic_platform", "price", "quantity", "class_name", "product_type", "create_time", "update_time", "remark"], new_shop_data)



def del_record_point(file_path, shop):
    record_skus = load_json(file_path)
    for i in range(0, len(record_skus), 10000):
        print(i)
        this_del_skus_str = ",".join([f'"{x}"' for x in record_skus[i:i + 10000]])
        db_delete("amazon_us_record", f"sku in ({this_del_skus_str}) and shop = '{shop}'")
        db_delete("amazon_us_record1", f"sku in ({this_del_skus_str}) and shop = '{shop}'")
        db_delete("amazon_us_record2", f"sku in ({this_del_skus_str}) and shop = '{shop}'")
        db_delete("amazon_us_record3", f"sku in ({this_del_skus_str}) and shop = '{shop}'")
        db_delete("amazon_us_record4", f"sku in ({this_del_skus_str}) and shop = '{shop}'")
        db_delete("amazon_us_record5", f"sku in ({this_del_skus_str}) and shop = '{shop}'")





if __name__ == '__main__':
    #create_new_sku_to_record_by_point_excel("YUANTINGRUI","Xboun","SF","Strawberry Fairy","FAQ")
    # get_point_date_sku_to_del_file("HELIN")
    # for shop in ["ZHOUZHENXI", "HELIN", "Wangkui", "lixiaoli", "Xboun", "UdorichUS", "Pengxiaohu_LJLB", "SVRCK", "ROVSHENYUSGJO_Inc", "KK_B11_Jason", "ZLDTECH", "Forio"]:
    #     gen_txt_file_to_update_list_price(shop)
    del_record_point(r'C:\Users\hunan\Desktop\Forio_del.json',"1_Forio")
    pass
