import os
import time
import random
import requests
from bs4 import BeautifulSoup
from requests import Response


class SyncImageCrawler:
    def __init__(self, save_dir="downloads"):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
            'Accept': 'text/plain, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': 'https://www.duitang.com/search/?kw=%E8%85%BF&type=feed',
            'X-Requested-With': 'XMLHttpRequest'
        }
        self.base_url = "https://www.duitang.com/napi/blogv2/list/by_search/"
        self.save_dir = save_dir

    def _get_search_url(self, keyword, after_id):
        params = {
            'kw': keyword,
            'after_id': after_id,
            'type': 'feed',
            'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
            '_': int(time.time() * 1000)
        }
        query = '&'.join([f'{k}={v}' for k, v in params.items()])
        return f"{self.base_url}?{query}"

    def check_image_exists(self, img_url: str) -> bool:
        # TODO: 用数据库、Redis、文件判断去重
        return False

    def save_to_db(self, title: str, img_url: str) -> bool:
        # TODO: 替换为你的数据库保存逻辑
        print(f"📥 保存记录: {title} | {img_url}")
        return True

    def download_image(self, img_url: str, folder_name: str, index: int):
        """下载单张图片到指定文件夹"""
        try:
            response = requests.get(img_url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                os.makedirs(folder_name, exist_ok=True)
                ext = os.path.splitext(img_url)[-1]
                if '?' in ext:
                    ext = ext.split('?')[0]
                ext = ext if ext in ['.jpg', '.jpeg', '.png'] else '.jpg'
                filename = os.path.join(folder_name, f"{index:03}{ext}")
                with open(filename, 'wb') as f:
                    f.write(response.content)
                print(f"✅ 下载成功: {filename}")
        except Exception as e:
            print(f"❌ 下载失败 {img_url}: {e}")

    def crawl_page(self, keyword: str, after_id: str) -> tuple[int, str, bool]:
        """爬取一页并下载其中的图集图片"""
        downloaded = 0
        next_after_id = after_id
        has_more = False

        url = self._get_search_url(keyword, after_id)
        print(f"\n📥 请求 URL: {url}")

        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                data = response.json()

                if data.get('status') == 1:
                    result_data = data.get('data', {})
                    object_list = result_data.get('object_list', [])
                    has_more = result_data.get('more', 0) == 1
                    next_after_id = result_data.get('after', after_id)
                    timestamp = str(int(time.time() * 1000))

                    for item in object_list:
                        sender = item.get('sender', {})
                        username = sender.get('username', 'unknown')
                        atlas_id = item.get('atlas_id')
                        if not atlas_id:
                            continue
                        next_url = f'https://www.duitang.com/atlas/?id={atlas_id}'

                        try:
                            next_response: Response = requests.get(next_url, headers=self.headers, timeout=10)
                            if next_response.status_code == 200:
                                soup = BeautifulSoup(next_response.text, "html.parser")
                                title_tag = soup.find('div', class_='atlas-desc')
                                title = title_tag.text.strip() if title_tag else ''
                                folder_name = (f"{timestamp}_{username}_{title}").replace("\n", "").replace("/", "_")
                                folder_path = os.path.join(self.save_dir, folder_name)

                                a_tags = soup.find_all("li", class_="atlas-item")
                                images_list = []
                                for li in a_tags:
                                    img_ = li.find('img')
                                    if img_:
                                        image_url = img_.get("src").replace(".thumb.300_300_c", "")
                                        images_list.append(image_url)

                                print(f"📂 图集: {folder_name}")
                                for i, image_url in enumerate(images_list):
                                    if not self.check_image_exists(image_url):
                                        self.download_image(image_url, folder_path, i)
                                        self.save_to_db(folder_name, image_url)
                                        downloaded += 1

                        except Exception as e:
                            print(f"❌ 图集页面出错: {e}")

                        time.sleep(random.uniform(0.3, 0.6))
        except Exception as e:
            print(f"❌ 请求失败 after_id={after_id}: {e}")

        return downloaded, next_after_id, has_more

    def crawl_images(self, keyword="腿", max_pages=2):
        total_downloaded = 0
        after_id = "0"

        for page in range(max_pages):
            downloaded, after_id, has_more = self.crawl_page(keyword, after_id)
            total_downloaded += downloaded
            print(f"✅ 第 {page + 1} 页完成，累计下载 {total_downloaded} 张")

            if not has_more:
                print("🚫 没有更多页面了")
                break

            time.sleep(random.uniform(1, 2))

        print(f"\n🎉 总共下载 {total_downloaded} 张图片")


if __name__ == "__main__":
    crawler = SyncImageCrawler(save_dir="download_images")  # 指定保存路径
    crawler.crawl_images(keyword="御姐", max_pages=10)  # 指定关键词与页数
