import asyncio
import time
import random
import aiohttp


class ImageCrawler:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
            'Accept': 'text/plain, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': 'https://www.duitang.com/search/?kw=%E8%85%BF&type=feed',
            'X-Requested-With': 'XMLHttpRequest'
        }
        self.base_url = "https://www.duitang.com/napi/blogv2/list/by_search/"

    def _get_search_url(self, keyword, after_id):
        params = {
            'kw': keyword,
            'after_id': after_id,
            'type': 'feed',
            'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
            '_': int(time.time() * 1000)
        }
        query = '&'.join([f'{k}={v}' for k, v in params.items()])
        return f"{self.base_url}?{query}"

    async def check_image_exists(self, img_url: str) -> bool:
        # TODO: 替换成你自己的逻辑，比如数据库判断
        return False

    async def save_to_db(self, title: str, img_url: str) -> bool:
        # TODO: 替换成你自己的存储逻辑
        print(f"✅ 保存图片: {title} | {img_url}")
        return True

    async def crawl_page(self, session, keyword: str, after_id: str) -> tuple[int, str, bool]:
        """爬取单个页面，返回 (下载数量, 下一页的 after_id, 是否还有更多页面)"""
        downloaded = 0
        next_after_id = after_id
        has_more = False

        try:
            search_url = self._get_search_url(keyword, after_id)
            print(f"📥 正在请求 after_id={after_id}，URL: {search_url}")

            async with session.get(search_url) as response:
                if response.status == 200:
                    data = await response.json()

                    if data.get('status') == 1:
                        result_data = data.get('data', {})
                        object_list = result_data.get('object_list', [])
                        has_more = result_data.get('more', 0) == 1
                        next_after_id = result_data.get('after', after_id)

                        for item in object_list:
                            photo = item.get('photo', {})
                            img_url = photo.get('path')
                            title = item.get('msg', '无标题')

                            if not img_url:
                                continue

                            if await self.check_image_exists(img_url):
                                print(f"⚠️ 图片已存在，跳过: {img_url}")
                                continue

                            print(f"🖼️ 发现新图片: {img_url}")
                            if await self.save_to_db(title, img_url):
                                downloaded += 1

                            await asyncio.sleep(random.uniform(0.3, 0.7))

        except Exception as e:
            print(f"❌ 爬取页面失败 after_id={after_id}: {str(e)}")

        return downloaded, next_after_id, has_more

    async def crawl_images(self, keyword="腿", max_pages=10):
        total_downloaded = 0
        after_id = "0"

        try:
            async with aiohttp.ClientSession(headers=self.headers) as session:
                for page in range(max_pages):
                    downloaded, new_after_id, has_more = await self.crawl_page(session, keyword, after_id)
                    total_downloaded += downloaded
                    after_id = new_after_id

                    print(f"✅ 第 {page + 1} 页完成，累计下载 {total_downloaded} 张图片")

                    if not has_more:
                        print("🚫 没有更多页面了")
                        break

                    await asyncio.sleep(random.uniform(1, 2))

                print(f"🎉 总共保存了 {total_downloaded} 张新图片")

        except Exception as e:
            print(f"❌ 爬取失败: {str(e)}")

        return total_downloaded


# ✅ 测试入口
if __name__ == "__main__":
    crawler = ImageCrawler()
    #asyncio.run(crawler.crawl_images(keyword="腿", max_pages=10))
    crawler.crawl_images(keyword="腿", max_pages=10)
