from main import ThreeJSDocsScraper
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import json
import os
import time
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed

def extract_categories_from_html(html_content, base_url):
    """从完整HTML目录结构中提取所有分类和链接"""
    soup = BeautifulSoup(html_content, 'html.parser')
    
    # 结构化存储所有分类和链接
    structure = []
    
    # 创建一个根级div包含所有内容，确保正确解析
    if soup.name != 'div':
        content_div = soup.find('div')
        if content_div:
            soup = content_div
    
    current_main = None
    current_sub = None
    
    # 需要处理的元素序列
    elements = list(soup.children)
    i = 0
    
    while i < len(elements):
        element = elements[i]
        
        # 跳过纯文本节点
        if element.name is None:
            i += 1
            continue
            
        # 主分类 (h2)
        if element.name == 'h2':
            main_name = element.text.strip()
            current_main = {
                'name': main_name,
                'subcategories': []
            }
            structure.append(current_main)
            current_sub = None
            
        # 子分类区块 (div containing h3)
        elif element.name == 'div' and element.find('h3'):
            h3 = element.find('h3')
            sub_name = h3.text.strip()
            
            current_sub = {
                'name': sub_name,
                'links': []
            }
            
            if current_main:
                current_main['subcategories'].append(current_sub)
                
            # 查找该div中的所有链接
            for a in element.find_all('a', href=True):
                href = a['href']
                title = a.text.strip()
                
                # 构建完整URL
                full_url = urljoin(base_url, href)
                
                # 添加到当前子分类
                current_sub['links'].append({
                    'url': full_url,
                    'title': title,
                    'href': href
                })
        
        i += 1
    
    # 清理空分类
    structure = [main for main in structure if main['subcategories']]
    for main in structure:
        main['subcategories'] = [sub for sub in main['subcategories'] if sub['links']]
    
    return structure

def create_category_map(structure):
    """创建一个从URL到分类的映射"""
    category_map = {}
    
    for main_category in structure:
        main_name = main_category['name']
        
        for sub_category in main_category['subcategories']:
            sub_name = sub_category['name']
            full_category = f"{main_name} / {sub_name}"
            
            for link in sub_category['links']:
                href = link['href']
                url = link['url']
                category_map[url] = {
                    'main_category': main_name,
                    'sub_category': sub_name,
                    'full_category': full_category
                }
    
    return category_map

def scrape_by_structure(html_content, base_url="http://www.yanhuangxueyuan.com/threejs/docs/", output_dir="threejs_docs_structured", max_workers=3):
    """根据HTML结构爬取所有文档"""
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    logger = logging.getLogger('ThreeJSStructureScraper')
    
    # 提取结构
    logger.info("正在解析文档结构...")
    structure = extract_categories_from_html(html_content, base_url)
    logger.info(f"解析完成，找到 {len(structure)} 个主分类")
    
    # 创建ThreeJSDocsScraper实例
    scraper = ThreeJSDocsScraper(base_url=base_url, max_workers=max_workers)
    
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 存储分类结构
    with open(os.path.join(output_dir, "structure.json"), 'w', encoding='utf-8') as f:
        json.dump(structure, f, ensure_ascii=False, indent=2)
    
    # 收集所有链接
    all_links = []
    category_map = {}
    
    for main_category in structure:
        main_name = main_category['name']
        
        for sub_category in main_category['subcategories']:
            sub_name = sub_category['name']
            full_category = f"{main_name}/{sub_name}"
            
            # 创建分类目录
            category_dir = os.path.join(output_dir, main_name.replace('/', '_'), sub_name.replace('/', '_'))
            os.makedirs(category_dir, exist_ok=True)
            
            # 收集该分类的所有链接
            for link in sub_category['links']:
                href = link['href']
                title = link['title']
                url = link['url']
                
                doc_info = {
                    'url': url,
                    'title': title,
                    'category': full_category,
                    'main_category': main_name,
                    'sub_category': sub_name,
                }
                all_links.append(doc_info)
                category_map[url] = full_category
    
    print(f"总共找到 {len(all_links)} 个文档链接")
      # 爬取所有链接
    all_docs = []
    success_count = 0
    failed_urls = []
    
    # 分批处理，每批30个链接
    batch_size = 30
    total_batches = (len(all_links) + batch_size - 1) // batch_size
    
    logger.info(f"开始爬取文档，总共 {len(all_links)} 个链接，分为 {total_batches} 批处理")
    
    for i in range(0, len(all_links), batch_size):
        batch = all_links[i:i+batch_size]
        current_batch = i//batch_size + 1
        logger.info(f"处理第 {current_batch}/{total_batches} 批，共 {len(batch)} 个链接")
        
        batch_start_time = time.time()
        batch_success = 0
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_doc = {executor.submit(scraper.scrape_single_doc, doc): doc for doc in batch}
            
            for future in as_completed(future_to_doc):
                doc_info = future_to_doc[future]
                try:
                    doc_data = future.result()
                    if doc_data:
                        # 添加分类信息
                        doc_data['main_category'] = doc_info['main_category']
                        doc_data['sub_category'] = doc_info['sub_category']
                        doc_data['full_category'] = doc_info['category']
                        
                        # 安全的文件名处理
                        safe_main_cat = doc_info['main_category'].replace('/', '_').replace('\\', '_')
                        safe_sub_cat = doc_info['sub_category'].replace('/', '_').replace('\\', '_')
                        safe_title = doc_info['title'].replace('/', '_').replace('\\', '_')
                        safe_title = "".join([c for c in safe_title if c.isalnum() or c in ['_', '-', ' ']])
                        
                        # 保存到对应分类目录
                        category_dir = os.path.join(output_dir, safe_main_cat, safe_sub_cat)
                        os.makedirs(category_dir, exist_ok=True)
                        
                        filename = safe_title + '.json'
                        file_path = os.path.join(category_dir, filename)
                        
                        with open(file_path, 'w', encoding='utf-8') as f:
                            json.dump(doc_data, f, ensure_ascii=False, indent=2)
                        
                        all_docs.append(doc_data)
                        success_count += 1
                        batch_success += 1
                        logger.info(f"成功: {doc_info['title']} ({doc_info['category']})")
                except Exception as e:
                    logger.error(f"处理失败: {doc_info['url']} - {str(e)}")
                    failed_urls.append({
                        'url': doc_info['url'],
                        'title': doc_info['title'],
                        'category': doc_info['category'],
                        'error': str(e)
                    })
        
        batch_time = time.time() - batch_start_time
        logger.info(f"批次 {current_batch}/{total_batches} 完成，成功: {batch_success}/{len(batch)}，耗时: {batch_time:.1f}秒")
        
        # 每批之间暂停一下，避免请求过于频繁
        if i + batch_size < len(all_links):
            pause_time = min(5, max(1, batch_time * 0.2))  # 动态暂停时间，但不超过5秒
            logger.info(f"暂停 {pause_time:.1f} 秒后处理下一批...")
            time.sleep(pause_time)
      # 保存完整数据和统计信息
    with open(os.path.join(output_dir, "all_docs.json"), 'w', encoding='utf-8') as f:
        json.dump(all_docs, f, ensure_ascii=False, indent=2)
    
    # 按分类统计成功文档数
    category_stats = {}
    for doc in all_docs:
        full_cat = doc['full_category']
        if full_cat not in category_stats:
            category_stats[full_cat] = 0
        category_stats[full_cat] += 1
    
    stats = {
        'total_links': len(all_links),
        'success_count': success_count,
        'failed_count': len(failed_urls),
        'failed_urls': failed_urls,
        'category_stats': category_stats,
        'scrape_time': time.strftime('%Y-%m-%d %H:%M:%S')
    }
    
    with open(os.path.join(output_dir, "scrape_stats.json"), 'w', encoding='utf-8') as f:
        json.dump(stats, f, ensure_ascii=False, indent=2)
    
    # 创建README.md文件，包含爬取结果摘要
    readme_content = f"""# Three.js 文档爬取结果

## 爬取统计
- 总链接数: {len(all_links)}
- 成功获取: {success_count} ({success_count/len(all_links)*100:.1f}%)
- 失败数量: {len(failed_urls)}
- 爬取时间: {stats['scrape_time']}

## 分类统计
"""

    # 添加分类统计
    for category, count in sorted(category_stats.items(), key=lambda x: x[1], reverse=True):
        readme_content += f"- {category}: {count} 个文档\n"
    
    # 如果有失败的链接，列出一部分
    if failed_urls:
        readme_content += "\n## 失败的链接 (前10个)\n"
        for i, url_info in enumerate(failed_urls[:10]):
            readme_content += f"{i+1}. [{url_info['title']}]({url_info['url']}) - {url_info.get('error', 'Unknown error')}\n"
    
    with open(os.path.join(output_dir, "README.md"), 'w', encoding='utf-8') as f:
        f.write(readme_content)
    
    logger.info(f"爬取完成! 总共获取 {success_count}/{len(all_links)} 个文档 ({success_count/len(all_links)*100:.1f}%)")
    logger.info(f"失败的URL数量: {len(failed_urls)} 个")
    
    # 按分类输出统计
    logger.info("\n各分类文档数量:")
    for category, count in sorted(category_stats.items(), key=lambda x: x[1], reverse=True):
        logger.info(f"  {category}: {count}")
    
    return all_docs, structure

if __name__ == "__main__":
    # HTML内容 - 从文件中读取
    html_file = "threejs_structure.html"
    if os.path.exists(html_file):
        with open(html_file, 'r', encoding='utf-8') as f:
            html_content = f.read()
    else:
        # 如果没有文件，就使用内联的HTML内容
        print("创建HTML结构文件...")
        html_content = """
        <div><h2>手册</h2><div><h3>起步</h3><ul><li><a href="manual/zh/introduction/Installation.html" target="viewer">安装</a></li><li><a href="manual/zh/introduction/Creating-a-scene.html" target="viewer">创建一个场景</a></li><li><a href="manual/zh/introduction/WebGL-compatibility-check.html" target="viewer">WebGL兼容性检查</a></li><li><a href="manual/zh/introduction/Drawing-lines.html" target="viewer">画线</a></li><li><a href="manual/zh/introduction/Creating-text.html" target="viewer">创建文字</a></li><li><a href="manual/zh/introduction/Loading-3D-models.html" target="viewer">载入3D模型</a></li><li><a href="manual/zh/introduction/FAQ.html" target="viewer">常见问题</a></li><li><a href="manual/zh/introduction/Useful-links.html" target="viewer">一些有用的链接</a></li></ul></div><div><h3>进阶</h3><ul><li><a href="manual/zh/introduction/How-to-update-things.html" target="viewer">如何更新场景</a></li><li><a href="manual/zh/introduction/How-to-dispose-of-objects.html" target="viewer">如何废置对象</a></li><li><a href="manual/zh/introduction/How-to-create-VR-content.html" target="viewer">如何创建VR内容</a></li><li><a href="manual/zh/introduction/How-to-use-post-processing.html" target="viewer">如何使用后期处理</a></li><li><a href="manual/zh/introduction/Matrix-transformations.html" target="viewer">矩阵变换</a></li><li><a href="manual/zh/introduction/Animation-system.html" target="viewer">动画系统</a></li></ul></div><h2>参考</h2><div><h3>动画</h3><ul><li><a href="api/zh/animation/AnimationAction.html" target="viewer" class="selected">AnimationAction</a></li><li><a href="api/zh/animation/AnimationClip.html" target="viewer" class="">AnimationClip</a></li><li><a href="api/zh/animation/AnimationMixer.html" target="viewer">AnimationMixer</a></li><li><a href="api/zh/animation/AnimationObjectGroup.html" target="viewer">AnimationObjectGroup</a></li><li><a href="api/zh/animation/AnimationUtils.html" target="viewer">AnimationUtils</a></li><li><a href="api/zh/animation/KeyframeTrack.html" target="viewer">KeyframeTrack</a></li><li><a href="api/zh/animation/PropertyBinding.html" target="viewer">PropertyBinding</a></li><li><a href="api/zh/animation/PropertyMixer.html" target="viewer">PropertyMixer</a></li></ul></div></div>
        """
        # 保存HTML内容到文件，以便于下次使用
        with open(html_file, 'w', encoding='utf-8') as f:
            f.write(html_content)
    
    # 运行爬虫
    base_url = "http://www.yanhuangxueyuan.com/threejs/docs/"
    scrape_by_structure(html_content, base_url)
