import os
import time
import re
from requests_html import HTMLSession

class NovelDownloader:
    def __init__(self, start_url, output_file='novel.txt'):
        self.start_url = start_url
        self.output_file = output_file
        self.session = HTMLSession()
        self.base_url = 'http://www.bookszw.com'
        self.novel_content = []
        
    def get_page_content(self, url):
        """获取页面内容，添加错误重试机制"""
        max_retries = 3
        retry_count = 0
        
        while retry_count < max_retries:
            try:
                response = self.session.get(url, timeout=10)
                response.raise_for_status()
                # 确保内容是中文编码
                response.encoding = response.apparent_encoding
                return response.html
            except Exception as e:
                retry_count += 1
                print(f"获取页面 {url} 失败 (尝试 {retry_count}/{max_retries}): {e}")
                if retry_count < max_retries:
                    time.sleep(2)  # 增加重试间隔
        
        return None
    
    def extract_chapter_title(self, html):
        """提取章节标题"""
        title_elem = html.find('h1.title', first=True)
        if title_elem:
            title = title_elem.text.strip()
            # 清理标题中可能的特殊字符和页码标记
            title = re.sub(r'[\n\r]+', '', title)
            # 去除可能的页码标记，如 (第1/2页) 等
            title = re.sub(r'\s*\(第\d+/\d+页\)', '', title)
            # 提取纯标题部分，格式如：数字【标题内容】
            pure_title_match = re.search(r'(\d+【[^】]+】)', title)
            if pure_title_match:
                return pure_title_match.group(1)
            return title
        return "未命名章节"
    
    def extract_chapter_content(self, html):
        """提取章节内容，排除script和h1元素"""
        content_elem = html.find('div.content#content', first=True)
        if not content_elem:
            return []
        
        # 移除script和h1元素
        for script in content_elem.find('script'):
            if hasattr(script, 'decompose'):
                script.decompose()
        for h1 in content_elem.find('h1'):
            if hasattr(h1, 'decompose'):
                h1.decompose()
        
        # 处理br标签分隔的内容
        text = content_elem.text
        lines = text.split('\n')
        
        # 移除空行和清理内容
        paragraphs = []
        for line in lines:
            # 清理行中的空白字符和特殊字符
            stripped_line = line.strip()
            if stripped_line:
                # 替换连续的空白字符为单个空格
                cleaned_line = re.sub(r'\s+', ' ', stripped_line)
                paragraphs.append(cleaned_line)
        
        # 如果有内容，移除第一行（重复的标题）
        if paragraphs:
            paragraphs = paragraphs[1:]
        
        # 检查最后一行是否包含"本章未完"等标记，如果有则移除
        end_keywords = ['本章未完', '下一页', '(本章未完，请点击下一页继续阅读)', '请点击下一页继续阅读']
        if paragraphs:
            while any(keyword in paragraphs[-1] for keyword in end_keywords):
                paragraphs = paragraphs[:-1]
                if not paragraphs:
                    break
        
        return paragraphs
    
    def extract_next_page_url(self, html):
        """提取下一页或下一章的链接"""
        section_opt = html.find('div.section-opt', first=True)
        if not section_opt:
            return None
        
        # 查找下一页或下一章链接
        next_links = section_opt.find('a')
        for link in next_links:
            if hasattr(link, 'text') and hasattr(link, 'attrs'):
                link_text = link.text.strip()
                if '下一页' in link_text or '下一章' in link_text:
                    href = link.attrs.get('href', '')
                    if href:
                        # 处理相对链接
                        if href.startswith('/'):
                            return self.base_url + href
                        elif href.startswith('http'):
                            return href
        
        return None
    
    def download_chapter(self, url):
        """下载单个章节内容（可能包含多页）"""
        current_chapter_paragraphs = []
        current_url = url
        next_chapter_url = None
        
        while current_url:
            html = self.get_page_content(current_url)
            if not html:
                break
            
            # 提取章节标题（只提取一次）
            if not current_chapter_paragraphs:
                chapter_title = self.extract_chapter_title(html)
                current_chapter_paragraphs.append(chapter_title)
            
            # 提取章节内容
            paragraphs = self.extract_chapter_content(html)
            # 过滤掉内容中的标题重复项（包含页码标记的行）
            filtered_paragraphs = []
            for para in paragraphs:
                # 跳过包含页码标记的行，这些通常是重复的标题
                if re.search(r'\(第\d+/\d+页\)', para):
                    continue
                filtered_paragraphs.append(para)
            current_chapter_paragraphs.extend(filtered_paragraphs)
            
            # 查找下一页链接
            next_page_url = self.extract_next_page_url(html)
            
            # 判断是否为同一章节的下一页
            if next_page_url and self._is_same_chapter_next_page(url, next_page_url):
                current_url = next_page_url
                # 添加延迟避免请求过快
                time.sleep(1)
            else:
                next_chapter_url = next_page_url
                break
        
        return current_chapter_paragraphs, next_chapter_url
    
    def _is_same_chapter_next_page(self, current_url, next_url):
        """判断是否为同一章节的下一页"""
        try:
            # 解析当前URL和下一个URL
            current_parts = current_url.split('/')[-1].split('.')[0].split('_')
            next_parts = next_url.split('/')[-1].split('.')[0].split('_')
            
            # 如果两个URL的基础部分相同，且下一个URL有分页标记，则视为同一章节的下一页
            if len(current_parts) == 1 and len(next_parts) > 1 and next_parts[1].isdigit():
                return True
            
            # 如果两个URL的第一部分相同，则视为同一章节的下一页
            if len(current_parts) > 1 and len(next_parts) > 1:
                return current_parts[0] == next_parts[0]
        except Exception as e:
            print(f"判断URL时出错: {e}")
            
        return False
    
    def download_novel(self):
        """下载整部小说"""
        print(f"开始下载小说，起始URL: {self.start_url}")
        
        current_url = self.start_url
        chapter_count = 0
        
        while current_url and chapter_count < 1000:  # 设置最大章节数限制，防止无限循环
            chapter_count += 1
            print(f"正在下载第{chapter_count}章...")
            
            # 下载章节内容
            chapter_content, current_url = self.download_chapter(current_url)
            
            # 如果有内容，添加到小说内容列表
            if len(chapter_content) > 1:  # 确保至少有标题和内容
                # 清理章节内容，移除重复的段落
                cleaned_content = self._clean_chapter_content(chapter_content)
                self.novel_content.append(cleaned_content)
                

            
            # 添加延迟避免请求过快
            if current_url:
                time.sleep(1)
        
        print(f"小说下载完成，共下载{chapter_count}章")
        
    def _clean_chapter_content(self, chapter_content):
        """清理章节内容，移除重复的段落和标题"""
        if not chapter_content:
            return []
        
        cleaned = []
        if chapter_content:
            # 提取并清理主标题
            main_title = chapter_content[0]
            # 再次确保主标题没有页码标记
            main_title = re.sub(r'\s*\(第\d+/\d+页\)', '', main_title)
            # 确保主标题是纯标题格式（数字【标题内容】）
            pure_title_match = re.search(r'(\d+【[^】]+】)', main_title)
            if pure_title_match:
                main_title = pure_title_match.group(1)
            cleaned.append(main_title)
            
        seen_paragraphs = set()
        title_pattern = re.compile(r'^\d+【[^】]+】$')  # 匹配纯章节标题格式
        
        # 从内容部分开始清理
        for para in chapter_content[1:]:
            # 跳过包含页码标记的行
            if re.search(r'\(第\d+/\d+页\)', para):
                continue
            
            # 跳过内容中的纯章节标题（可能是重复的）
            if title_pattern.match(para):
                # 如果已经有相同标题，则跳过
                if para == cleaned[0]:
                    continue
                # 否则添加到已见集合但不添加到清理后的内容中
                seen_paragraphs.add(para)
                continue
            
            # 跳过包含"求月票"等推广信息的行
            if '求月票' in para or '感谢大佬' in para:
                continue
            
            # 移除太短的段落（可能是广告或噪声）
            if len(para) < 10:
                continue
            
            # 移除重复的段落
            if para not in seen_paragraphs:
                seen_paragraphs.add(para)
                cleaned.append(para)
        
        return cleaned
    
    def save_to_file(self):
        """将小说内容保存到文件"""
        if not self.novel_content:
            print("没有下载到小说内容，无法保存")
            return
        
        try:
            # 确保输出目录存在
            output_dir = os.path.dirname(self.output_file)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)
            
            with open(self.output_file, 'w', encoding='utf-8') as f:
                # 写入各章节内容
                for i, chapter in enumerate(self.novel_content):
                    # 章节之间用------------分隔（除了第一章前）
                    if i > 0:
                        f.write("------------\n")
                    
                    # 写入章节内容（标题+正文）
                    for line in chapter:
                        # 确保每行以换行符结尾
                        f.write(f"{line}\n")
            
            print(f"小说已保存到: {os.path.abspath(self.output_file)}")
        except Exception as e:
            print(f"保存小说文件失败: {e}")
    
    def run(self):
        """运行下载器"""
        try:
            self.download_novel()
            self.save_to_file()
        finally:
            # 确保session被关闭
            try:
                self.session.close()
            except:
                pass

if __name__ == "__main__":
    # 第一个章节的网址
    start_url = 'http://www.bookszw.com/148/148765/53266187.html'
    
    # 创建下载器实例并运行
    downloader = NovelDownloader(start_url)
    downloader.run()