import json
import os
import requests
from tqdm import tqdm
import time
import re
import threading
import concurrent.futures

# 定义图片和JSON文件的路径
IMAGES_BASE_DIR = 'static/images'
JSON_FILE_PATH = '最新处理好的数据.json'
DOWNLOADED_RECORD_FILE = 'downloaded_skuids.txt'
MAX_WORKERS = 10  # 最大线程数

def sanitize_filename(filename):
    """清理文件名，移除或替换非法字符"""
    if not filename:
        return ""
    # 移除路径相关的字符
    filename = filename.replace('/', '_').replace('\\', '_')
    # 定义非法字符模式
    illegal_chars = r'[<>:\"/\\|?*\x00-\x1F]'
    filename = re.sub(illegal_chars, '_', filename)
    # 将多个连续的下划线替换为单个下划线
    filename = re.sub(r'_+', '_', filename)
    # 移除开头和结尾的下划线或点
    filename = filename.strip('_.')
    # 确保文件名不为空
    if not filename:
        filename = "downloaded_image"
    
    # 限制文件名长度
    base, ext = os.path.splitext(filename)
    max_base_len = 255 - len(ext)
    if len(base) > max_base_len:
        base = base[:max_base_len]
    filename = base + ext
    
    return filename

def download_image(url, save_path, retries=3, delay=5, timeout=30):
    """下载单个图片并保存到指定路径，带重试机制"""
    for attempt in range(retries):
        try:
            response = requests.get(url, stream=True, timeout=timeout)
            response.raise_for_status()
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            return True
        except requests.exceptions.Timeout:
            if attempt < retries - 1:
                time.sleep(delay)
            else:
                return False
        except requests.exceptions.RequestException:
            if attempt < retries - 1:
                time.sleep(delay)
            else:
                return False
    return False

def find_skuid_and_images(data, path=""):
    """递归查找所有包含skuid和详情页图片的项目"""
    results = []
    
    if isinstance(data, dict):
        # 检查当前字典是否包含skuid和详情页图片
        skuid = data.get('skuid')
        images = data.get('详情页图片', [])
        
        if skuid and isinstance(images, list) and images:
            results.append({
                'skuid': skuid,
                'images': images
            })
        
        # 递归处理字典的所有值
        for key, value in data.items():
            sub_results = find_skuid_and_images(value, f"{path}.{key}" if path else key)
            results.extend(sub_results)
            
    elif isinstance(data, list):
        # 递归处理列表的所有项
        for i, item in enumerate(data):
            sub_results = find_skuid_and_images(item, f"{path}[{i}]")
            results.extend(sub_results)
            
    return results

def load_downloaded_skuids():
    """加载已下载过的商品skuid列表"""
    downloaded_skuids = set()
    if os.path.exists(DOWNLOADED_RECORD_FILE):
        try:
            with open(DOWNLOADED_RECORD_FILE, 'r', encoding='utf-8') as f:
                for line in f:
                    skuid = line.strip()
                    if skuid:
                        downloaded_skuids.add(skuid)
            print(f"已加载 {len(downloaded_skuids)} 个已下载商品记录")
        except Exception as e:
            print(f"读取已下载记录文件时出错: {e}")
    return downloaded_skuids

def save_downloaded_skuid(skuid):
    """保存已下载的商品skuid到记录文件"""
    try:
        with open(DOWNLOADED_RECORD_FILE, 'a', encoding='utf-8') as f:
            f.write(f"{skuid}\n")
    except Exception as e:
        print(f"保存已下载记录时出错: {e}")

def process_image_task(task_data):
    """处理单个图片下载任务"""
    img_url = task_data['url']
    save_path = task_data['save_path']
    
    # 如果文件已存在且非空，则跳过
    if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
        return True
        
    # 下载图片
    return download_image(img_url, save_path)

def download_item_images(item):
    """下载单个商品的所有图片，使用线程池并行下载图片"""
    skuid = item['skuid']
    images = item['images']
    
    # 创建商品对应的文件夹
    item_dir = os.path.join(IMAGES_BASE_DIR, str(skuid))
    if not os.path.exists(item_dir):
        os.makedirs(item_dir)
    
    # 准备所有图片下载任务
    download_tasks = []
    
    for i, img_data in enumerate(images):
        if isinstance(img_data, str):
            img_url = img_data
        elif isinstance(img_data, dict):
            # 如果是字典，尝试提取URL
            img_url = None
            for val in img_data.values():
                if isinstance(val, str) and val.startswith(('http://', 'https://')):
                    img_url = val
                    break
        else:
            continue
            
        if not img_url or not isinstance(img_url, str) or not img_url.startswith(('http://', 'https://')):
            continue
            
        # 从URL提取文件名
        try:
            file_name = os.path.basename(img_url.split('?')[0])
            if not file_name or '.' not in file_name:
                # 尝试从URL中提取扩展名
                ext = ""
                if ".jpg" in img_url.lower() or ".jpeg" in img_url.lower():
                    ext = ".jpg"
                elif ".png" in img_url.lower():
                    ext = ".png"
                elif ".gif" in img_url.lower():
                    ext = ".gif"
                elif ".webp" in img_url.lower():
                    ext = ".webp"
                elif ".avif" in img_url.lower():
                    ext = ".avif"
                else:
                    ext = ".jpg"
                file_name = f"image_{i+1}{ext}"
                
            # 清理文件名
            file_name = sanitize_filename(file_name)
            if not file_name:
                file_name = f"image_{i+1}.jpg"
        except Exception:
            file_name = f"image_{i+1}.jpg"
            
        save_path = os.path.join(item_dir, file_name)
        
        # 添加到下载任务列表
        download_tasks.append({
            'url': img_url,
            'save_path': save_path
        })
    
    # 如果没有需要下载的图片，直接返回
    if not download_tasks:
        return True
    
    # 使用线程池并行下载图片
    success_count = 0
    total_tasks = len(download_tasks)
    
    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        # 提交所有下载任务
        futures = [executor.submit(process_image_task, task) for task in download_tasks]
        
        # 等待所有任务完成
        for future in concurrent.futures.as_completed(futures):
            if future.result():
                success_count += 1
    
    # 只有当所有图片都成功下载，才返回True
    return success_count == total_tasks

def main():
    # 创建基础图片文件夹
    if not os.path.exists(IMAGES_BASE_DIR):
        os.makedirs(IMAGES_BASE_DIR)
        print(f"已创建文件夹: {IMAGES_BASE_DIR}")

    # 加载已下载的商品记录
    downloaded_skuids = load_downloaded_skuids()

    # 读取JSON数据
    try:
        with open(JSON_FILE_PATH, 'r', encoding='utf-8') as f:
            data = json.load(f)
        print(f"成功读取JSON文件: {JSON_FILE_PATH}")
    except Exception as e:
        print(f"读取JSON文件时发生错误: {e}")
        return

    # 查找所有包含skuid和详情页图片的项目
    items = find_skuid_and_images(data)
    
    if not items:
        print("未找到任何包含skuid和详情页图片的商品")
        return
    
    # 过滤掉已下载的商品
    new_items = [item for item in items if str(item['skuid']) not in downloaded_skuids]
    skipped_count = len(items) - len(new_items)
    
    print(f"找到 {len(items)} 个商品，其中 {skipped_count} 个已下载过，需要处理 {len(new_items)} 个")
    
    if not new_items:
        print("所有商品都已下载过，无需重复下载")
        return
    
    # 创建进度条
    progress_bar = tqdm(total=len(new_items), desc="商品下载进度", unit="个")
    
    # 创建计数器
    completed_count = 0
    
    # 顺序处理每个商品，但每个商品的图片使用线程池并行下载
    for item in new_items:
        skuid = item['skuid']
        
        # 下载商品的所有图片
        if download_item_images(item):
            # 所有图片下载成功，记录该商品
            save_downloaded_skuid(skuid)
            completed_count += 1
        
        # 更新进度条
        progress_bar.update(1)
        progress_bar.set_postfix(完成=f"{completed_count}/{len(new_items)}")
    
    progress_bar.close()
    print(f"\n下载完成，共处理 {len(new_items)} 个商品，成功完整下载 {completed_count} 个商品")

if __name__ == "__main__":
    main()