#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
导出搜索结果脚本
"""

import asyncio
import sys
import os

# 添加项目路径到sys.path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from crawlers.douyin.web.web_crawler import DouyinWebCrawler
from utils.export_manager import export_manager


async def export_search_results():
    """导出搜索结果"""
    print("开始搜索并导出视频数据...")
    
    try:
        # 初始化爬虫
        crawler = DouyinWebCrawler()
        
        # 设置搜索参数
        keyword = "美食"  # 直接使用默认值
        count = 3  # 直接使用默认值
        
        print(f"\n搜索关键字: {keyword}")
        print(f"获取数量: {count}")
        print("-" * 50)
        
        # 执行搜索
        print("正在搜索视频...")
        search_data = await crawler.fetch_videos_complete_data(keyword, count)
        
        if not search_data or len(search_data.get('videos', [])) == 0:
            print("未找到相关视频")
            return
        
        videos = search_data.get('videos', [])
        print(f"找到 {len(videos)} 个视频")
        
        # 显示搜索结果摘要
        print("\n搜索结果预览:")
        for i, video in enumerate(videos[:3], 1):  # 只显示前3个
            print(f"{i}. {video.get('desc', '无描述')[:50]}...")
            print(f"   作者: {video.get('author', {}).get('nickname', '未知')}")
            print(f"   点赞: {video.get('statistics', {}).get('digg_count', 0):,}")
        
        if len(videos) > 3:
            print(f"   ... 还有 {len(videos) - 3} 个视频")
        
        # 导出数据
        print(f"\n正在导出数据...")
        
        # 导出JSON
        json_path = export_manager.export_to_json(search_data, keyword)
        print(f"JSON文件: {json_path}")
        
        # 导出CSV
        csv_path = export_manager.export_to_csv(search_data, keyword)
        print(f"CSV文件: {csv_path}")
        
        # 显示文件信息
        json_size = os.path.getsize(json_path) / 1024
        csv_size = os.path.getsize(csv_path) / 1024
        
        print(f"\n文件信息:")
        print(f"JSON文件大小: {json_size:.1f} KB")
        print(f"CSV文件大小: {csv_size:.1f} KB")
        
        # 显示导出摘要
        summary = export_manager.get_export_summary(search_data)
        print(f"\n导出摘要:")
        print(f"总视频数: {summary['total_videos']}")
        print(f"总评论数: {summary['total_comments']}")
        print(f"总点赞数: {summary['total_likes']:,}")
        
        print(f"\n导出完成！")
        print(f"文件位置: {os.path.dirname(json_path)}")
        
    except Exception as e:
        print(f"导出失败: {str(e)}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    # 运行导出
    asyncio.run(export_search_results())