import asyncio
import json
import os
import uuid
from novel_parser import NovelParser
from urllib.parse import urlparse
# 数据文件路径
DATA_FILE = 'novels_data.json'

async def preload_next_chapter(novel_id):
    """异步预加载小说的下一章节"""
    # 读取现有数据
    with open(DATA_FILE, 'r', encoding='utf-8') as f:
        novels = json.load(f)
    
    # 查找小说
    novel = next((n for n in novels if n['id'] == novel_id), None)

    if not novel:
        print(f"未找到小说 ID: {novel_id}")
        return
    
    # 检查是否有下一章可预加载
    if not novel['chapters']:
        print(f"小说 {novel_id} 没有章节数据")
        return
    
    # 获取最新章节的URL
    last_chapter = max(novel['chapters'], key=lambda c: c['id'])
    if novel['current_chapter']+5 > last_chapter['id']:
        return
    """异步处理小说解析和存储"""
    parsed_url = urlparse(novel['url'])
    baseurl = f'{parsed_url.scheme}://{parsed_url.netloc}'
    parser = NovelParser(headless=True,baseurl=baseurl)
    try:
        # 预加载下一章节
        preloaded_chapters = await parser.preload_next_chapters(last_chapter['next_url'], 1)
        
        if preloaded_chapters:
            # 保存新章节
            for preloaded in preloaded_chapters:
                content_id = save_chapter_content(novel_id,preloaded.get('content', '暂无内容'))
                new_chapter_id = last_chapter['id'] + 1
                new_chapter = {
                    'id': new_chapter_id,
                    'title': preloaded.get('title', f'第{new_chapter_id}章'),
                    'content_id': content_id,
                    'next_url': preloaded.get('next_url', '')
                }
                
                # 添加到章节列表
                novel['chapters'].append(new_chapter)
                novel['total_chapters'] = len(novel['chapters'])
                
                print(f"为小说 {novel['title']} 预加载了章节: {new_chapter['title']}")
        
        # 保存更新后的数据
        with open(DATA_FILE, 'w', encoding='utf-8') as f:
            json.dump(novels, f, ensure_ascii=False, indent=2)
    except Exception as e:
        print(f"预加载章节时出错: {e}")
    finally:
        await parser.close_browser()

async def process_novel(title, url, novel_id, chapters):
    """异步处理小说解析和存储"""
    from urllib.parse import urlparse
    parsed_url = urlparse(url)
    baseurl = f'{parsed_url.scheme}://{parsed_url.netloc}'
    parser = NovelParser(headless=True, baseurl=baseurl)
    try:
        # 解析当前章节
        chapter_data = await parser.parse_novel_chapter(url)
        
        # 预加载更多章节（这里可以根据需要调整预加载数量）
        preloaded_chapters = []
        if chapter_data.get('next_url'):
            preloaded_chapters = await parser.preload_next_chapters(chapter_data['next_url'], 5)
        
        # 保存当前章节内容
        content_id = save_chapter_content(novel_id,chapter_data.get('content', '暂无内容'))
        current_chapter = {
            'id': 1,
            'title': chapter_data.get('title', '第一章'),
            'content_id': content_id,
            'next_url': chapter_data.get('next_url', '')
        }
        
        # 保存预加载的章节
        new_chapters = [current_chapter]
        for i, preloaded in enumerate(preloaded_chapters):
            content_id = save_chapter_content(novel_id,preloaded.get('content', '暂无内容'))
            new_chapters.append({
                'id': i + 2,
                'title': preloaded.get('title', f'第{i+2}章'),
                'content_id': content_id,
                'next_url': preloaded.get('next_url', '')
            })
        
        # 更新小说数据
        update_novel_data(novel_id, new_chapters)
        print(f"小说 {title} 解析完成，共 {len(new_chapters)} 章")
    except Exception as e:
        print(f"处理小说 {title} 时出错: {e}")
    finally:
        await parser.close_browser()

def save_chapter_content(novel_id,content):
    """保存章节内容到文件"""
    content_id = str(uuid.uuid4())
    content_dir = 'content/'+novel_id
    os.makedirs(content_dir, exist_ok=True)
    file_path = os.path.join(content_dir, content_id)
    try:
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(content)
        return content_id
    except Exception as e:
        print(f"保存章节内容失败: {e}")
        return None

def update_novel_data(novel_id, chapters):
    """更新小说数据"""
    try:
        # 读取现有数据
        with open(DATA_FILE, 'r', encoding='utf-8') as f:
            novels = json.load(f)
        
        # 查找并更新小说
        for novel in novels:
            if novel['id'] == novel_id:
                novel['chapters'] = chapters
                novel['total_chapters'] = len(chapters)
                break
        
        # 保存更新后的数据
        with open(DATA_FILE, 'w', encoding='utf-8') as f:
            json.dump(novels, f, ensure_ascii=False, indent=2)
    except Exception as e:
        print(f"更新小说数据失败: {e}")