#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
分析爬取内容的脚本
"""

import os
import sqlite3
import json
from collections import Counter, defaultdict
from datetime import datetime

def analyze_content(db_path="./output/index.db", output_file="monitoring/content_analysis.json"):
    """分析爬取内容
    
    Args:
        db_path: 数据库路径
        output_file: 输出文件路径
    """
    try:
        # 连接数据库
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 获取资源类型分布
        cursor.execute("SELECT type, COUNT(*) FROM resources GROUP BY type")
        type_counts = dict(cursor.fetchall())
        
        # 获取资源类别分布
        cursor.execute("SELECT category, COUNT(*) FROM resources WHERE category != '' GROUP BY category")
        category_counts = dict(cursor.fetchall())
        
        # 获取URL路径分布
        cursor.execute("SELECT url FROM resources")
        urls = [row[0] for row in cursor.fetchall()]
        
        path_counts = Counter()
        for url in urls:
            parts = url.split('/')
            if len(parts) > 3:
                path = parts[3]
                if '?' in path:
                    path = path.split('?')[0]
                path_counts[path] += 1
        
        # 获取文件类型分布
        file_types = defaultdict(int)
        for root, _, files in os.walk("./output"):
            for file in files:
                if '.' in file:
                    ext = file.split('.')[-1].lower()
                    file_types[ext] += 1
        
        # 获取最大的类别
        cursor.execute("""
            SELECT category, COUNT(*) as count 
            FROM resources 
            WHERE category != '' 
            GROUP BY category 
            ORDER BY count DESC 
            LIMIT 5
        """)
        top_categories = dict(cursor.fetchall())
        
        # 获取最近爬取的每个类别的一个资源
        recent_by_category = {}
        for category in top_categories:
            cursor.execute("""
                SELECT url, title, crawl_time 
                FROM resources 
                WHERE category = ? 
                ORDER BY crawl_time DESC 
                LIMIT 1
            """, (category,))
            result = cursor.fetchone()
            if result:
                url, title, time = result
                recent_by_category[category] = {
                    "url": url,
                    "title": title or url.split('/')[-1],
                    "time": time
                }
        
        # 关闭连接
        conn.close()
        
        # 创建分析报告
        report = {
            "timestamp": datetime.now().isoformat(),
            "resource_types": type_counts,
            "categories": category_counts,
            "path_distribution": dict(path_counts.most_common(10)),
            "file_types": dict(sorted(file_types.items(), key=lambda x: x[1], reverse=True)),
            "top_categories": top_categories,
            "recent_by_category": recent_by_category,
            "total_urls": len(urls),
            "output_size": get_dir_size("./output"),
            "output_size_human": format_size(get_dir_size("./output"))
        }
        
        # 保存报告
        os.makedirs(os.path.dirname(output_file), exist_ok=True)
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)
        
        print(f"内容分析报告已保存到: {output_file}")
        
        # 打印摘要
        print("\n===== 内容分析摘要 =====")
        print(f"总资源数: {len(urls)}")
        print("\n资源类型分布:")
        for type_name, count in type_counts.items():
            print(f"  - {type_name}: {count}")
        
        print("\n前5个类别:")
        for category, count in top_categories.items():
            print(f"  - {category}: {count}")
        
        print("\n文件类型分布:")
        for ext, count in sorted(file_types.items(), key=lambda x: x[1], reverse=True)[:5]:
            print(f"  - .{ext}: {count}")
        
        print(f"\n输出目录大小: {format_size(get_dir_size('./output'))}")
        print("========================\n")
        
    except sqlite3.Error as e:
        print(f"数据库错误: {e}")
    except Exception as e:
        print(f"分析内容时出错: {e}")

def get_dir_size(path):
    """获取目录大小
    
    Args:
        path: 目录路径
        
    Returns:
        int: 目录大小（字节）
    """
    total_size = 0
    for dirpath, dirnames, filenames in os.walk(path):
        for f in filenames:
            fp = os.path.join(dirpath, f)
            if not os.path.islink(fp):
                total_size += os.path.getsize(fp)
    return total_size

def format_size(size_bytes):
    """格式化文件大小
    
    Args:
        size_bytes: 文件大小（字节）
        
    Returns:
        str: 格式化后的文件大小
    """
    for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
        if size_bytes < 1024.0 or unit == 'TB':
            break
        size_bytes /= 1024.0
    return f"{size_bytes:.2f} {unit}"

if __name__ == "__main__":
    analyze_content()
