import csv
import json
import logging
import os
import re
import threading
import time
from datetime import datetime

import requests

from product_upload.domain.basic.basic_product import BasicProduct
from product_upload.util.basic.common_util import sleep_random_duration
from product_upload.util.basic.mysql_util import db_list_by_page, db_batch_insert, db_batch_update, db_get_one
from product_upload.util.basic.openai_util import model_request

logger = logging.getLogger(__name__)


def contains_non_ascii_unicode(text):
    return any(ord(char) > 127 for char in text)


def download_csv(url, save_path, max_retries=3):
    delete_file(save_path)
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    for attempt in range(max_retries + 1):
        try:
            response = requests.get(url, timeout=120)
            response.raise_for_status()
            with open(save_path, 'wb') as file:
                file.write(response.content)
            return True
        except requests.RequestException as e:
            logger.error(f"下载文件时出错: {e}")
            if attempt < max_retries:
                wait_time = 2 ** attempt
                time.sleep(wait_time)
            else:
                return False


def delete_file(file_path):
    if os.path.exists(file_path):
        os.remove(file_path)


def read_csv_to_2d_list(file_path):
    two_d_list = []
    try:
        with open(file_path, mode='r', newline='', encoding='utf-8') as file:
            reader = csv.reader(file)
            for row in reader:
                two_d_list.append(row)
        return two_d_list
    except Exception as e:
        logger.error(f"读取CSV文件时出错: {e}")
        return None


def ai_gen_size_and_package(product_info):
    sys_prompt = """
    I will provide you with product information, please help me extract the product length, width, and height (item_) and packaging length, width, and height (package_) of the product information.If you are not confident enough about the length, width, and height of the packaging, please use the product's length, width, and height
Note: All dimensions are in inches, and the total quantity is in pounds.
Strictly return in JSON format,No need for explanation, and no other irrelevant information. for example:
{"item_length":1,"item_width":1,"item_height":1,"package_length":1,"package_width":1,"package_height":1}
    """
    user_prompt = f"product information:\n{product_info}"
    size_dict = model_request(True, user_prompt, sys_prompt, model="gpt-4o-mini")
    keys_are_identical = set(size_dict.keys()) == {"item_length", "item_width", "item_height", "package_length", "package_width", "package_height"}
    if not keys_are_identical:
        sleep_random_duration()
        # size_dict = model_request(True, user_prompt, sys_prompt, model="gemini-2.0-flash-001")
        size_dict = model_request(True, user_prompt, sys_prompt, model="gpt-4o-mini")
    for k, v in size_dict.items():
        if v is None or v <= 0:
            size_dict[k] = 1
    return size_dict


def filter_xl_table_data(html_content):
    if html_content is None or html_content == "":
        return ""
    # 正则表达式模式用于匹配任何标签内的文本内容
    cleaned_text = re.sub(r'<[^>]+>', '\n', html_content)
    non_ascii_pattern = re.compile(r'[^\x00-\x7F]+')
    # 使用正则表达式移除所有的非ASCII字符
    cleaned_text_without_unicode = non_ascii_pattern.sub('', cleaned_text)
    return cleaned_text_without_unicode.strip().replace("WARNING:", "").replace("California Proposition 65", "").replace("California's Proposition 65", "").replace("\n\n\n", "\n").replace("\n\n", "\n")


def combination_product_info(title, description, properties, weight, number_of_packages, parcel_or_pallet, gender, diameter, size, product_volume):
    info = "title:" + title
    if weight:
        info += f"\nweight:{weight} lb"
    if number_of_packages and len(number_of_packages) > 0:
        info += f"\nnumber_of_packages:{number_of_packages}"
    if parcel_or_pallet and len(parcel_or_pallet) > 0:
        info += f"\nparcel_or_pallet:{parcel_or_pallet}"
    if gender and len(gender) > 0:
        info += f"\ngender:{gender}"
    if diameter and len(diameter) > 0 and not diameter.startswith("0"):
        info += f"\ndiameter:{diameter}"
    if size and len(size) > 0:
        info += f"\nsize:{size}"
    if product_volume and len(product_volume) > 0:
        info += f"\nproduct_volume:{product_volume} m³"
    if properties and len(properties) > 0:
        properties = filter_xl_table_data(properties)
        info += f"\nproperties:\n{properties}"
    if description and len(description) > 0:
        info += f"\ndescription:\n{description}"
    return info


def parse_csv_and_flush_db(xl_download_csv_link=r"https://transport.productsup.io/d88ebe68f8d10ecf2704/channel/188219/vidaXL_us_dropshipping.csv", save_file_path=f'{os.path.join(os.path.expanduser("~"), 'Desktop')}\\tmp\\xl.csv'):
    logger.info('start: csv vidaXL quantity,price,new product.')
    download_csv(xl_download_csv_link, save_file_path)
    create_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    data_list = read_csv_to_2d_list(save_file_path)
    xl_sku_list = [x[1] for x in data_list]
    db_product_list = db_list_by_page("basic_product", "id,sku", 'platform="XL"', BasicProduct, 1, 9999999)
    db_sku_list = [x.sku for x in db_product_list]
    db_sku_id_map = {x.sku: x.id for x in db_product_list}
    update_product_skus = set(xl_sku_list).intersection(set(db_sku_list))
    new_product_skus = set(xl_sku_list).difference(set(db_sku_list))
    remove_product_skus = set(db_sku_list).difference(set(xl_sku_list))
    need_update_quantity_price = []
    need_update_image_published = []
    need_add_product = []
    need_offline_12_skus =[]
    need_offline_60_skus = []
    for row in data_list[1:]:
        sku = row[1]
        price = float(row[5])
        quantity = int(row[6].replace(",", ""))
        # 发货需要耗时
        estimated_total_delivery_time = int(row[35]) if row[35] else 0
        if db_sku_id_map.get(sku, None) and 12 <= estimated_total_delivery_time < 60:
            need_offline_12_skus.append([db_sku_id_map[sku], 8, f"运输时间过长:{estimated_total_delivery_time}天"])
        elif db_sku_id_map.get(sku, None) and estimated_total_delivery_time >= 60:
                need_offline_60_skus.append([db_sku_id_map[sku], 8, f"荷兰仓"])
        # 下架了
        if price <= 0 or sku in remove_product_skus:
            if sku in update_product_skus:
                update_product_skus.remove(sku)
            elif sku in new_product_skus:
                new_product_skus.remove(sku)
            remove_product_skus.add(sku)
            continue
        # 更新价格或库存
        if sku in update_product_skus:
            need_update_quantity_price.append([db_sku_id_map[sku], 1, quantity, price, row[0]])
            need_update_image_published.append([db_sku_id_map[sku], 1])
            continue
        # 是新品需要新增
        if sku in new_product_skus:
            xl_one = db_get_one("basic_product", f'platform="XL" and sku ="{sku}"', BasicProduct)
            if xl_one:
                continue
            platform = "XL"
            link = row[0]
            title = row[2].replace('vidaxl', '').replace('vidaXL', '').strip()
            description = row[7]
            properties = row[9]
            weight = round(float(row[10].replace(",", ".")), 2)
            # 过滤链接有问题
            image_list = row[11:22]
            if image_list is None or len(image_list) == 0:
                continue
            number_of_packages = row[24]
            parcel_or_pallet = row[25]
            gender = row[27]
            diameter = row[28]
            size = row[29]
            product_volume = row[33].replace(",", ".")
            _data = {"platform": platform, "link": link, "sku": sku, "title": title, "price": price, "quantity": quantity, "description": description, "properties": properties, "weight": weight, "image_list": image_list,
                     "number_of_packages": number_of_packages, "parcel_or_pallet": parcel_or_pallet,
                     "gender": gender, "diameter": diameter, "size": size, "product_volume": product_volume}
            product_info = combination_product_info(_data.get("title"), _data.get("description"), _data.get("properties"), _data.get("weight"), _data.get("number_of_packages"), _data.get("parcel_or_pallet"), _data.get("gender"),
                                                    _data.get("diameter"), _data.get("size"), _data.get("product_volume"))
            size_dict = ai_gen_size_and_package(product_info)
            packages = [size_dict.get("package_length", 1), size_dict.get("package_width", 1), size_dict.get("package_height", 1), weight]
            dimensions = [size_dict.get("item_length", 1), size_dict.get("item_width", 1), size_dict.get("item_height", 1), weight]
            dimensions_str = f'{dimensions[0]} * {dimensions[1]} * {dimensions[2]} * {dimensions[3]}'
            packages_str = f'{packages[0]} * {packages[1]} * {packages[2]} * {packages[3]}'
            json_text = {"sku": sku, "title": title, "published": 1, "not_available": [], "packages": packages, "dimensions": dimensions, "main_image": image_list[0], "images_list": image_list, "price": price, "quantity": quantity}
            need_add_product.append([0, 1, "", platform, sku, link, image_list[0], title, dimensions_str, packages_str, price, quantity, json.dumps(json_text), product_info, create_time, "US", "000vidaXL000", "vidaXL"])
    field_list = ["status", "published", "not_available", "platform", "sku", "link", "main_image", "title", "dimensions", "packages", "price", "quantity", "json_text", "product_info", "create_time", "region", "supplier_code", "supplier_name"]
    if len(need_add_product) > 0:
        db_batch_insert("basic_product", field_list, need_add_product)
    db_batch_update("basic_product", ["id", "published", "quantity", "price", "link"], need_update_quantity_price)
    time.sleep(1)
    db_batch_update("basic_image", ["basic_id", "published"], need_update_image_published)
    # 下架XL商品
    if remove_product_skus:
        remove_product_skus = list(set(remove_product_skus))
        tmp_remove = [[db_sku_id_map[x], 0] for x in remove_product_skus]
        db_batch_update("basic_product", ["id", "published"], tmp_remove)
        time.sleep(1)
        db_batch_update("basic_image", ["basic_id", "published"], tmp_remove)
    if need_offline_12_skus:
        db_batch_update("basic_product", ["id", "status","remark"], need_offline_12_skus)
        db_batch_update("basic_image", ["basic_id", "status", "remark"], need_offline_12_skus)
    if need_offline_60_skus:
        db_batch_update("basic_product", ["id", "status","remark"], need_offline_60_skus)
        db_batch_update("basic_image", ["basic_id", "status", "remark"], need_offline_60_skus)
    logger.info(f'end: update count:{len(need_update_quantity_price)},new count:{len(need_add_product)},offline count:{len(remove_product_skus)}')


# 对已有的XL进行刷新数据入库
def local_hand_update_db_info():
    xl_download_csv_link = r"https://transport.productsup.io/d88ebe68f8d10ecf2704/channel/188219/vidaXL_us_dropshipping.csv"
    save_file_path = f'{os.path.join(os.path.expanduser("~"), 'Desktop')}\\tmp\\xl.csv'
    download_csv(xl_download_csv_link, save_file_path)
    data_list = read_csv_to_2d_list(save_file_path)[1:]
    xl_sku_list = [x[1] for x in data_list]
    db_sku_tuple = db_list_by_page("basic_product", "id,sku", 'json_text is null platform ="XL"', None, 1, 1000000)
    db_sku_list = [x[1] for x in db_sku_tuple]
    db_sku_id_map = {x[1]: x[0] for x in db_sku_tuple}
    opt_sku_list = set(xl_sku_list).intersection(set(db_sku_list))
    chunk_size = 90
    for i in range(0, len(data_list), chunk_size):
        _tmp_data = data_list[i:i + chunk_size]
        sleep_random_duration(3, 5)
        threading.Thread(target=thread_local, args=(_tmp_data, db_sku_id_map, opt_sku_list)).start()


def thread_local(data_list, db_sku_id_map, opt_sku_list):
    need_update_info = []
    for row in data_list:
        sku = row[1]
        price = float(row[5])
        quantity = int(row[6].replace(",", ""))
        if sku in opt_sku_list:
            platform = "XL"
            link = row[0]
            title = row[2].replace('vidaXL', '').strip()
            description = row[7]
            properties = row[9]
            weight = round(float(row[10].replace(",", ".")), 2)
            image_list = row[11:22]
            if image_list is None or len(image_list) == 0:
                continue
            number_of_packages = row[24]
            parcel_or_pallet = row[25]
            gender = row[27]
            diameter = row[28]
            size = row[29]
            product_volume = row[33].replace(",", ".")
            _data = {"platform": platform, "link": link, "sku": sku, "title": title, "price": price, "quantity": quantity, "description": description, "properties": properties, "weight": weight, "image_list": image_list,
                     "number_of_packages": number_of_packages, "parcel_or_pallet": parcel_or_pallet,
                     "gender": gender, "diameter": diameter, "size": size, "product_volume": product_volume}
            product_info = combination_product_info(_data.get("title"), _data.get("description"), _data.get("properties"), _data.get("weight"), _data.get("number_of_packages"), _data.get("parcel_or_pallet"), _data.get("gender"),
                                                    _data.get("diameter"), _data.get("size"), _data.get("product_volume"))
            size_dict = ai_gen_size_and_package(product_info)
            packages = [size_dict.get("package_length", 1), size_dict.get("package_width", 1), size_dict.get("package_height", 1), weight]
            dimensions = [size_dict.get("item_length", 1), size_dict.get("item_width", 1), size_dict.get("item_height", 1), weight]
            dimensions_str = f'{dimensions[0]} * {dimensions[1]} * {dimensions[2]} * {dimensions[3]}'
            packages_str = f'{packages[0]} * {packages[1]} * {packages[2]} * {packages[3]}'
            json_text = {"sku": sku, "title": title, "published": 1, "not_available": [], "packages": packages, "dimensions": dimensions, "main_image": image_list[0], "images_list": image_list, "price": price, "quantity": quantity}
            need_update_info.append([db_sku_id_map[sku], 1, 1, "[]", platform, sku, link, image_list[0], title, dimensions_str, packages_str, price, quantity, json.dumps(json_text), product_info])
            field_list = ["id", "status", "published", "not_available", "platform", "sku", "link", "main_image", "title", "dimensions", "packages", "price", "quantity", "json_text", "product_info"]
            logger.info(f"{sku} 产品:{dimensions_str}, 包装:, {packages_str}")
            db_batch_update("basic_product", field_list, need_update_info)
            need_update_info = []


if __name__ == '__main__':
    parse_csv_and_flush_db()
    pass
