import json
import mimetypes
import os
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime

import boto3
import requests
from botocore.exceptions import NoCredentialsError, ClientError

from product_upload.develop_third.aws_util import encrypt, aws_bucket_base_api
from product_upload.domain.basic.basic_image import BasicImage
from product_upload.domain.basic.basic_product import BasicProduct
from product_upload.domain.basic.basic_supplier import BasicSupplier
from product_upload.util.basic.common_util import sleep_random_duration, load_json
from product_upload.util.basic.mysql_util import db_list_by_page, db_batch_update, db_delete, db_get_count

lock = threading.Lock()


# 本地文件的方式上传S3
def upload_fail_link_s3():
    bucket_key_link_dict = load_json(r"C:\Users\hunan\Desktop\need_append_s3.json")
    # 失败的
    fail_key = []
    fail_link = []
    index_ = 0
    for bucket_key_, link in bucket_key_link_dict.items():
        index_ += 1
        thread_up = threading.Thread(target=thread_to_upload_by_local, args=(index_, bucket_key_, link, fail_key, fail_link))
        thread_up.start()
        time.sleep(0.5)
    print("线程全部启动完毕")
    time.sleep(60 * 10)
    with open(r"C:\Users\hunan\Desktop\fail_key.json", 'w', encoding='utf-8') as json_file:
        json.dump(fail_key, json_file, indent=4, ensure_ascii=False)
    with open(r"C:\Users\hunan\Desktop\fail_link.json", 'w', encoding='utf-8') as json_file:
        json.dump(fail_link, json_file, indent=4, ensure_ascii=False)
    print("写入完毕,等着你手动停止了")
    time.sleep(60 * 60 * 24 * 4)


def thread_to_upload_by_local(index, bucket_key_, link, fail_key, fail_link):
    start_time = time.time()
    local_path = download_image(index, link)
    end_time = time.time()
    down_time = end_time - start_time
    if local_path:
        start_time = time.time()
        success_key = upload_by_local_file(bucket_key_, local_path)
        end_time = time.time()
        upload_time = end_time - start_time
        if success_key:
            print(f'线程:{index},{bucket_key_},上传成功,下载耗:{down_time},上传耗时:{upload_time}')
        else:
            with lock:
                fail_key.append(bucket_key_)
    else:
        with lock:
            fail_link.append(link)


def download_image(index, url):
    desktop_path = os.path.expanduser("~/Desktop")
    save_dir = os.path.join(desktop_path, "tmp", "origin_img")
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    max_retries = 3
    wait_time = 2
    timestamp = str(uuid.uuid4()).replace('-', "")[:10] + datetime.now().strftime("%Y%m%d%H%M%S")
    file_path = os.path.join(save_dir, f"{timestamp}_{index}.{url.split(".")[-1]}")
    for attempt in range(max_retries):
        try:
            response = requests.get(url, timeout=15)
            response.raise_for_status()
            with open(file_path, 'wb') as file:
                file.write(response.content)
            return file_path
        except requests.RequestException as e:
            if attempt < max_retries - 1:
                time.sleep(wait_time)
    return None


def delete_downloaded_file(file_path):
    try:
        if os.path.exists(file_path):
            os.remove(file_path)
    except Exception as e:
        print(f"delete img file {file_path} fail,{e}")


# S3上传文件，通过在线链接，入lambda
def upload_by_file_url(bucket_name, file_url, s3_bucket='hunan0424'):
    url = "https://k1uhmrcgri.execute-api.us-east-2.amazonaws.com/upload_by_file_url"
    headers = {
        "Content-Type": "application/json"
    }
    file_name = file_url.split("/")[-1]  # 从 URL 中获取文件名
    mime_type, _ = mimetypes.guess_type(file_name)
    data = {
        "content_type": mime_type,
        "url": file_url,
        "s3_key": bucket_name,
        "s3_bucket": s3_bucket,
        "token": "&%@*XHfbs_lucky_rmd_qulai_2025..*#@#df."
    }
    try:
        response = requests.post(url, json=data, headers=headers, timeout=15)
        response.raise_for_status()
        if response.status_code == 200:
            return bucket_name
    except Exception as e:
        return None


# 加载S3的key从txt文件
def load_list_via_txt():
    key_list = []

    with open('./../../keys.txt', 'r', encoding='utf-16') as file:
        for line in file:
            # 分割每一行，得到多个路径
            keys = line.strip().split()
            # 将每个路径添加到列表中
            key_list.extend(keys)
    res_list = [x for x in key_list if len(x) > 10]
    with open(r"C:\Users\hunan\Desktop\bucket_all_keys.json", 'w', encoding='utf-8') as json_file:
        json.dump(res_list, json_file, indent=4, ensure_ascii=False)


# 移除S3上传成功的，获取失败的key
def remove_success_key():
    bucket_key_link_dict = load_json(r"C:\Users\hunan\Desktop\bucket_name_link_json.json")
    success_bucket = load_json(r"C:\Users\hunan\Desktop\xl_keys.json")
    need_upload = list(set(bucket_key_link_dict.keys()) - set(success_bucket))
    need_update_key_link_dict = {x: bucket_key_link_dict[x] for x in need_upload if x.startswith("images/xl/") and len(bucket_key_link_dict[x]) > 2}
    with open(r"C:\Users\hunan\Desktop\need_append_s3.json", 'w', encoding='utf-8') as json_file:
        json.dump(need_update_key_link_dict, json_file, indent=4, ensure_ascii=False)


# 获取S3筒的keys
def list_s3_keys():
    s3 = boto3.client('s3')
    bucket_name = 'hunan0424'
    prefix = 'images/xl/'
    all_keys = []  # 存储所有的 key
    continuation_token = None  # 用于分页获取的 token
    while True:
        # 如果存在 continuation_token，说明需要继续分页获取
        if continuation_token:
            response = s3.list_objects_v2(Bucket=bucket_name, Prefix=prefix, ContinuationToken=continuation_token)
        else:
            # 否则，首次请求不带 continuation_token
            response = s3.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
        # 获取当前页的 key
        if 'Contents' in response:
            keys = [obj['Key'] for obj in response['Contents']]
            all_keys.extend(keys)
        # 检查是否有下一页，如果没有则停止
        continuation_token = response.get('NextContinuationToken')
        if not continuation_token:
            break
    # 将所有 key 保存到文件
    with open(r"C:\Users\hunan\Desktop\xl_keys.json", 'w', encoding='utf-8') as json_file:
        json.dump(all_keys, json_file, indent=4, ensure_ascii=False)


# 生成json文件，获取basic的原图
def gen_db_img_json():
    res = db_list_by_page("basic_image", "basic_id", "platform = 'XL'", None, 1, 999999)
    ids = [str(x[0]) for x in res]
    res_tuple = db_list_by_page("basic_product", "id,sku,json_text", f'id in ({",".join(ids)})', None, 1, 99999999)
    bucket_name_link_json = {}
    for item in res_tuple:
        id_ = item[0]
        sku = item[1]
        json_text = item[2]
        if json_text is not None and len(json_text) > 10:
            print(id_, sku)
            img_list = json.loads(json_text).get('images_list', [])
            for index, img in enumerate(img_list):
                suffix = img.split(".")[-1]
                filename = f'{sku}.{suffix}'
                encrypt_str = encrypt(filename.split(".")[0])
                extra_name = f'{str(id_)}_{index + 1}'
                bucket_name = f'images/xl/{extra_name}_{encrypt_str}.{suffix}'
                bucket_name_link_json[bucket_name] = img
    with open(r"C:\Users\hunan\Desktop\bucket_name_link_json.json", 'w', encoding='utf-8') as json_file:
        json.dump(bucket_name_link_json, json_file, indent=4, ensure_ascii=False)


# 以上是本地测试方法


# 上传到s3，通过本地文件
def upload_by_local_file(bucket_key, local_file_path, s3_bucket='hunan0424'):
    if local_file_path is None:
        return None
    s3 = boto3.client('s3')
    try:
        mime_type, _ = mimetypes.guess_type(local_file_path)
        s3.upload_file(local_file_path, s3_bucket, bucket_key, ExtraArgs={'ContentType': mime_type})
        return bucket_key
    except NoCredentialsError:
        print("凭证错误，请检查 AWS 访问密钥")
        return None
    except ClientError as e:
        # 捕获其他 S3 客户端相关的错误
        error_code = e.response['Error']['Code']
        print(f"S3 上传失败，错误码：{error_code}，错误信息：{e}")
        return None
    except Exception as e:
        # 捕获其他未知错误
        print(f"上传过程中发生错误：{e}")
        return None


# 提供外部调用,链接或者文件的方式调用
def aws_upload_img_to_s3(platform):
    bucket_prefix = 'images/xl/'
    if platform == "GG":
        bucket_prefix = 'images/gg/'
    elif platform == "DB":
        bucket_prefix = 'images/doba/'
    elif platform == "SY":
        bucket_prefix = 'images/sy/'
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = []
        for page in range(1, 9999999):
            db_basic_image_list = db_list_by_page("basic_image", ",".join(BasicImage.fields), f"platform='{platform}' and ((main_image !='' and published = 1 and status != 2 and  main_image not like 'https://hunan0424%' ) or (remark = '有未上传S3的图'))", BasicImage, page, 100)
            if not db_basic_image_list:
                break
            futures.append(executor.submit(thread_upload_img_to_s3, db_basic_image_list, bucket_prefix))
            sleep_random_duration()
        print(f"uploadS3: {platform},wait thread...")
        for future in as_completed(futures):
            try:
                future.result()
            except Exception as e:
                print(f"uploadS3 fail: {e}")
        print(f"end: uploadS3,{platform}...")


def thread_upload_img_to_s3(db_basic_image_list, bucket_prefix):
    for basic_image in db_basic_image_list:
        image_list = [basic_image.main_image, basic_image.other_image_url1, basic_image.other_image_url2, basic_image.other_image_url3, basic_image.other_image_url4, basic_image.other_image_url5,
                      basic_image.other_image_url6, basic_image.other_image_url7, basic_image.other_image_url8, basic_image.other_image_url9, basic_image.other_image_url10, basic_image.other_image_url11,
                      basic_image.other_image_url12, basic_image.other_image_url13, basic_image.other_image_url14, basic_image.other_image_url15]
        sku = basic_image.sku
        image_list = [x for x in image_list if x and len(x) > 5]
        if len(image_list) == 0:
            continue
        s3_link = []
        for index, image_url in enumerate(image_list):
            #如果已经上传成功过,那么放入并跳过
            if image_url.startswith("https://hunan0424"):
                s3_link.append(image_url)
                continue
            encrypt_str = encrypt(sku)
            bucket_name_origin = f'{bucket_prefix}{basic_image.basic_id}_{index + 1}_{encrypt_str}.jpg'
            bucket_name = upload_by_file_url(bucket_name_origin, image_url)
            if not bucket_name:
                local_file = download_image(index, image_url)
                bucket_name = upload_by_local_file(bucket_name_origin, local_file)
                delete_downloaded_file(local_file)
            if bucket_name:
                s3_link.append(aws_bucket_base_api + bucket_name)
            else:
                s3_link.append(image_url)
        tmp_list = [basic_image.id]
        s3_link = s3_link + (16 - len(s3_link)) * ['']
        tmp_list.extend(s3_link)
        remark = "" if all(url.startswith("https://hunan0424") for url in tmp_list[1:] if url) else "有未上传S3的图"
        tmp_list.append(remark)
        db_batch_update("basic_image", ["id", "main_image", "other_image_url1", "other_image_url2", "other_image_url3", "other_image_url4", "other_image_url5", "other_image_url6", "other_image_url7"
            , "other_image_url8", "other_image_url9", "other_image_url10", "other_image_url11", "other_image_url12", "other_image_url13", "other_image_url14", "other_image_url15","remark"], [tmp_list])


def batch_delete_s3(object_keys):
    s3 = boto3.client('s3')
    # 设置存储桶名称
    bucket_name = 'hunan0424'
    # 要删除的对象 keys 列表
    batch_size = 1000
    for i in range(0, len(object_keys), batch_size):
        batch_keys = object_keys[i:i + batch_size]
        # 删除对象
        response = s3.delete_objects(
            Bucket=bucket_name,
            Delete={'Objects': [{'Key': key} for key in batch_keys]})
        print(f"Deleted {len(batch_keys)} objects from {bucket_name}")
        print(response)

def tmp_def_invalid():
    for page in range(1, 9999):
        db_list = db_list_by_page("basic_product", "id,json_text", "platform != 'GG' and id not in (select DISTINCT(basic_id) from amazon_us_record) and product_type ='wallart'", BasicProduct, 1, 10000)
        if not db_list:
            break
        ids = [str(x.id) for x in db_list]
        print(len(ids))
        s3_keys = []
        for product in db_list:
            if product.json_text:
                json_text = json.loads(product.json_text)
                if json_text.get("images_list"):
                    for image in json_text.get("images_list"):
                        if image:
                            img = image.split("?")[0]
                            s3_keys.append(img.replace("https://hunan0424.s3.us-east-2.amazonaws.com/", ""))
        if ids:
            db_delete("basic_image", f"basic_id in ({','.join(ids)})")
            db_delete("amazon_us_tag", f"basic_id in ({','.join(ids)})")
            db_delete("amazon_us_content", f"basic_id in ({','.join(ids)})")
            db_delete("basic_product", f"id in ({','.join(ids)})")
        if s3_keys:
            batch_delete_s3(s3_keys)

if __name__ == '__main__':
    tmp_def_invalid()
    pass
