import requests
import json
import time
import requests
import os
import time
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor, as_completed


def scrape_infinite_scroll_api(base_api_url, headers=None):
    """
    通过分析的API接口直接获取数据
    """
    all_data = []
    page = 1
    limit = 20  # 每页数据量
    start=0


    while True:
        # 构造API请求参数
        params = {
            'type': 30,
            'interval_id': '100:90',
            'start':start,
            'limit':limit,
        }

        try:
            response = requests.get(base_api_url, params=params, headers=headers)
            response.raise_for_status()

            # 解析JSON响应
            data = response.json()
            # 根据实际API结构调整
            items = data
            if not items:
                print(f"没有更多数据，停止在第{page}页")
                break
            download_images_batch(items)
            all_data.extend(items)
            print(f"已获取第{page}页，共{len(items)}条数据")
            page+=1
            start += 20
            time.sleep(1)  # 避免请求过于频繁

        except requests.RequestException as e:
            print(f"请求第{page}页时出错: {e}")
            break
        except json.JSONDecodeError as e:
            print(f"解析第{page}页JSON时出错: {e}")
            break

    return all_data


def download_images_batch(items, save_path="./images", max_workers=5):
    """
    批量下载图片

    Args:
        urls (list): 图片URL列表
        save_path (str): 保存路径
        max_workers (int): 最大并发数

    Returns:
        list: 下载结果列表
    """
    results = []

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有下载任务
        future_to_url = {
            executor.submit(download_single_image, item, save_path): item
            for item in items
        }

        # 获取结果
        for future in as_completed(future_to_url):
            result = future.result()
            results.append(result)
            if result["status"] == "success":
                print(f"✓ 成功下载: {result['url']}")
            else:
                print(f"✗ 下载失败: {result['url']} - {result['error']}")

    return results


def download_single_image(item, save_path="./images", timeout=30):
    """
    下载单张图片
    """
    try:
        response = requests.get(item['cover_url'], timeout=timeout)
        response.raise_for_status()

        # 创建保存目录
        os.makedirs(save_path, exist_ok=True)

        # 生成文件名
        parsed_url = urlparse(item['cover_url'])
        filename = item['title']

        # 如果URL中没有有效的文件名，生成一个
        if not filename or '.' not in filename:
            # 尝试从Content-Type获取扩展名
            content_type = response.headers.get('content-type', '')
            if 'jpeg' in content_type or 'jpg' in content_type:
                ext = '.jpg'
            elif 'png' in content_type:
                ext = '.png'
            elif 'gif' in content_type:
                ext = '.gif'
            else:
                ext = '.jpg'  # 默认扩展名

            filename = f"image_{filename}{ext}"

        # 完整保存路径
        full_path = os.path.join(save_path, filename)

        # 保存图片
        with open(full_path, 'wb') as f:
            f.write(response.content)

        return {"url": item['cover_url'], "status": "success", "path": full_path}

    except Exception as e:
        return {"url": item['cover_url'], "status": "failed", "error": str(e)}