import asyncio
import aiohttp
from bs4 import BeautifulSoup
import json
# 导入正确的 tqdm.asyncio tqdm
from tqdm.asyncio import tqdm
import functools
import logging
import sys

# --- 配置日志记录器 ---
logger = logging.getLogger('NovelScraper')
logger.setLevel(logging.DEBUG)

console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)

file_handler = logging.FileHandler('novel_scraper_async.log', encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)

logger.addHandler(console_handler)
logger.addHandler(file_handler)

def async_retry(max_attempts=3, delay=1):
    def decorator(func):
        @functools.wraps(func)
        async def wrapper(*args, **kwargs):
            for attempt in range(1, max_attempts + 1):
                try:
                    result = await func(*args, **kwargs)
                    if result is not None:
                        return result
                    else:
                        logger.warning(f"尝试 {attempt} 获取 {func.__name__} 失败：返回 None。正在重试...")
                except aiohttp.ClientError as e:
                    logger.warning(f"尝试 {attempt} 获取 {func.__name__} 失败：{e}")
                
                # 只有在还有重试机会时才等待
                if attempt < max_attempts:
                    logger.info(f"正在等待 {delay} 秒后重试...")
                    # 确保等待操作也是异步的
                    await asyncio.sleep(delay)
            logger.error(f"经过 {max_attempts} 次尝试后，{func.__name__} 最终失败。")
            return None
        return wrapper
    return decorator

class NovelScraper:
    def __init__(self, base_url, headers=None, timesleep=0):
        self.base_url = base_url.rstrip('/') + '/'
        self.headers = headers if headers else {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
        }
        self.timesleep = timesleep
        self.novel_data = {
            "author": None,
            "desc": None,
            "index_href": None,
            "content": []
        }
        self.chapters_parsed_count = 0
        self.logger = logging.getLogger('NovelScraper')

    @async_retry(max_attempts=5, delay=1)
    async def _fetch_html_content(self, session, url):
        self.logger.debug(f"正在尝试获取 URL: {url}")
        try:
            # 这里的session是外部传入的，它由scrape_novel中的async with负责生命周期
            async with session.get(url, headers=self.headers, timeout=600) as response:
                response.raise_for_status()
                if self.timesleep > 0:
                    await asyncio.sleep(self.timesleep)
                self.logger.debug(f"成功获取 URL: {url}")
                return await response.text()
        except aiohttp.ClientResponseError as e:
            self.logger.error(f"HTTP错误: {url} 返回状态码 {e.status}")
            return None
        except aiohttp.ClientError as e:
            self.logger.error(f"请求失败: {url} - {e}")
            return None

    def _parse_chapter_list(self, html_content):
        soup = BeautifulSoup(html_content, 'html.parser')
        chap_dict = {}
        try:
            chapter_dl = soup.find_all('dl')[1]
            for i, tag in enumerate(chapter_dl.find_all('dd')):
                if tag.a and '展开全部章节' not in tag.a.string:
                    chap_name = tag.a.string.strip()
                    href = self.base_url + tag.a['href'].lstrip('/')
                    chap_dict[i] = (chap_name, href)
        except IndexError:
            self.logger.warning("未找到预期的章节列表结构。")
        except AttributeError:
            self.logger.warning("在解析章节列表时出现属性错误。")
        return chap_dict

    def _extract_novel_metadata(self, html_content, index_href):
        soup = BeautifulSoup(html_content, 'html.parser')
        author, desc = None, None
        try:
            author_tag = soup.find("div", {"class": 'small'})
            if author_tag:
                author_span = author_tag.find('span')
                author = author_span.string
            
            desc_tag = soup.find("div", {"class": "intro"})
            if desc_tag:
                desc = desc_tag.find('dd').get_text(strip=True)
        except (AttributeError, IndexError):
            self.logger.warning("未找到预期的元数据元素。")

        self.novel_data["author"] = author
        self.novel_data["desc"] = desc
        self.novel_data["index_href"] = index_href
        self.logger.info(f"成功提取小说元数据：作者={author}, 描述={desc[:30] if desc else 'N/A'}...")

    async def _download_chapter(self, session, chap_index, chap_name, chap_href):
        """
        异步下载和解析单个章节
        """
        chapter_html = await self._fetch_html_content(session, chap_href)
        if chapter_html:
            content = self._parse_chapter_content(chapter_html)
            self.logger.info(f"成功下载章节：'{chap_name}'")
            return {'index': chap_index, 'data': {chap_name: content}}
        else:
            self.logger.warning(f"由于获取失败，跳过章节 '{chap_name}'。")
            return None

    def _parse_chapter_content(self, html_content):
        soup = BeautifulSoup(html_content, 'html.parser')
        content = ""
        try:
            content_div = soup.find(id='chaptercontent')
            if content_div:
                content = content_div.get_text(strip=True, separator='\n')
        except AttributeError:
            self.logger.warning("未找到章节内容 div。")
        return content

    async def scrape_novel(self, chap_url, max_chapters=20):
        self.logger.info(f"正在从 {chap_url} 获取小说目录页...")
        
        # 使用 aiohttp.ClientSession 来管理HTTP连接池，确保所有请求都在其生命周期内
        async with aiohttp.ClientSession() as session:
            main_page_html = await self._fetch_html_content(session, chap_url)
            if not main_page_html:
                self.logger.error("获取主目录页失败。程序退出。")
                return False

            self._extract_novel_metadata(main_page_html, chap_url)
            chap_dict = self._parse_chapter_list(main_page_html)

            if not chap_dict:
                self.logger.error("未找到任何章节。程序退出。")
                return False
            
            self.logger.info(f"共找到 {len(chap_dict)} 个章节，开始下载最多 {max_chapters} 个章节...")

            chapters_to_scrape = [(i, chap_dict[i][0], chap_dict[i][1]) for i in sorted(chap_dict.keys())][:max_chapters]
            
            tasks = [self._download_chapter(session, index, name, href) for index, name, href in chapters_to_scrape]
            
            # 修正 tqdm.asyncio 的用法：它应该包装一个可等待的iterable，而不是被await
            results = await tqdm.gather(*tasks, total=len(tasks), desc="下载章节")
            
            results = [r for r in results if r is not None]

            # 对下载结果进行排序
            sorted_results = sorted(results, key=lambda x: x['index'])

            self.novel_data["content"] = [item['data'] for item in sorted_results]
            self.chapters_parsed_count = len(sorted_results)
            
            self.logger.info(f"下载完成，共成功下载 {self.chapters_parsed_count} 个章节。")
            return True

    def save_to_json(self, filename):
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(self.novel_data, f, ensure_ascii=False, indent=4)
            self.logger.info(f"小说数据已成功保存到 {filename}")
        except IOError as e:
            self.logger.error(f"保存文件 {filename} 失败: {e}")

# --- 使用示例 ---
async def main():
    output_filename = "寒霜千年_async.json"
    
    base_site_url = "https://www.c3719.lol/"
    chapter_list_url = base_site_url + "book/260609/"

    # max_workers 参数在协程版本中不再需要
    scraper = NovelScraper(base_url=base_site_url)
    success = await scraper.scrape_novel(chap_url=chapter_list_url, max_chapters=50)

    if success:
        scraper.save_to_json(output_filename)

if __name__ == '__main__':
    asyncio.run(main())