import asyncio
import os
import time

import yaml
from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
from pywebio.platform import run_event_loop

from app.web.views.ViewsUtils import ViewsUtils
from crawlers.douyin.web.web_crawler import DouyinWebCrawler
from utils.export_manager import export_manager

# 初始化爬虫
douyin_crawler = DouyinWebCrawler()

# 读取配置文件
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'config.yaml')
with open(config_path, 'r', encoding='utf-8') as file:
    config = yaml.safe_load(file)


class VideoSearch:
    """视频搜索功能类"""
    
    def __init__(self):
        self.search_results = None
        self.current_keyword = ""
    
    def search_interface(self):
        """搜索界面主函数"""
        # 清除之前的内容
        clear()
        
        # 标题
        put_html("<h1>🔍 抖音视频搜索</h1>")
        put_html("<hr>")
        
        # 搜索表单
        data = input_group(ViewsUtils.t("搜索视频", "Search Videos"), [
            input(
                name="keyword",
                label=ViewsUtils.t("搜索关键字", "Search Keyword"),
                placeholder=ViewsUtils.t("请输入搜索关键字，如：美食、旅游等", "Enter search keyword, e.g.: food, travel"),
                required=True,
                validate=self.validate_keyword
            ),
            select(
                name="count",
                label=ViewsUtils.t("获取数量", "Number of Videos"),
                options=[
                    (3, "3"),
                    (5, "5"),
                    (10, "10")
                ],
                value=5
            ),
            select(
                name="sort_type",
                label=ViewsUtils.t("排序方式", "Sort Type"),
                options=[
                    ("综合排序", "综合排序"),
                    ("最新发布", "最新发布"),
                    ("最多点赞", "最多点赞")
                ],
                value="综合排序",
                help_text="选择视频排序方式"
            ),
            checkbox(
                name="export_format",
                label=ViewsUtils.t("导出格式", "Export Format"),
                options=[
                    ("json", "JSON"),
                    ("csv", "CSV")
                ],
                value=["json", "csv"]
            )
        ])
        
        # 开始搜索
        self.current_keyword = data["keyword"]
        
        # 转换排序类型中文到数字
        sort_type_map = {
            "综合排序": 0,
            "最新发布": 1,
            "最多点赞": 2
        }
        sort_type = sort_type_map.get(data["sort_type"], 0)
        
        self.start_search(
            keyword=data["keyword"],
            count=data["count"],
            sort_type=sort_type,
            export_formats=data["export_format"]
        )
    
    def validate_keyword(self, keyword):
        """验证关键字"""
        if not keyword or len(keyword.strip()) == 0:
            return ViewsUtils.t("关键字不能为空", "Keyword cannot be empty")
        
        if len(keyword.strip()) > 50:
            return ViewsUtils.t("关键字太长，请控制在50字符以内", "Keyword is too long, please keep it within 50 characters")
        
        return None
    
    def start_search(self, keyword: str, count: int, sort_type: int, export_formats: list):
        """开始搜索"""
        put_html("<hr>")
        put_html("<h2>🚀 开始搜索...</h2>")
        
        # 显示搜索信息
        put_table([
            [ViewsUtils.t("搜索关键字", "Keyword"), keyword],
            [ViewsUtils.t("获取数量", "Count"), str(count)],
            [ViewsUtils.t("排序方式", "Sort"), self.get_sort_name(sort_type)],
            [ViewsUtils.t("导出格式", "Export Format"), ", ".join(export_formats)]
        ])
        
        # 执行搜索
        self.perform_search(keyword, count, sort_type, export_formats)
    
    def perform_search(self, keyword: str, count: int, sort_type: int, export_formats: list):
        """使用线程池的同步搜索方法"""
        import concurrent.futures
        import threading
        import time
        
        try:
            # 显示进度
            put_processbar('search_progress')
            set_processbar('search_progress', 0.1)
            put_text(ViewsUtils.t("正在搜索视频...", "Searching videos..."))
            
            print(f"Web搜索开始: 关键词={keyword}, 数量={count}")
            
            # 使用线程池执行异步搜索，避免PyWebIO的异步问题
            search_data = None
            search_error = None
            
            def thread_search():
                """在新线程中执行搜索"""
                nonlocal search_data, search_error
                try:
                    print("线程搜索开始")
                    import asyncio
                    loop = asyncio.new_event_loop()
                    asyncio.set_event_loop(loop)
                    
                    async def do_search():
                        return await douyin_crawler.fetch_videos_complete_data(keyword, count)
                    
                    search_data = loop.run_until_complete(
                        asyncio.wait_for(do_search(), timeout=60.0)  # 增加到60秒超时
                    )
                    loop.close()
                    print("线程搜索完成")
                    
                except Exception as e:
                    search_error = str(e)
                    print(f"线程搜索异常: {e}")
                    import traceback
                    traceback.print_exc()
            
            # 启动搜索线程
            search_thread = threading.Thread(target=thread_search)
            search_thread.daemon = True
            search_thread.start()
            
            # 等待搜索完成，最多等待70秒
            max_wait_time = 70
            wait_time = 0
            
            while search_thread.is_alive() and wait_time < max_wait_time:
                time.sleep(0.5)
                wait_time += 0.5
                
                # 更新进度条
                progress = 0.1 + (wait_time / max_wait_time) * 0.6
                set_processbar('search_progress', progress)
            
            if search_thread.is_alive():
                print("Web搜索超时，线程仍在运行")
                self.show_error(ViewsUtils.t("搜索超时（20秒），请稍后重试", "Search timeout (20s), please try again"))
                return
            
            set_processbar('search_progress', 0.8)
            
            if search_error:
                print(f"Web搜索异常: {search_error}")
                self.show_error(f"{ViewsUtils.t('搜索失败', 'Search failed')}: {search_error}")
                return
            
            if not search_data:
                print("Web搜索: 搜索返回空结果")
                self.show_error(ViewsUtils.t("搜索失败，请稍后重试", "Search failed, please try again"))
                return
                
            if len(search_data.get('videos', [])) == 0:
                print("Web搜索: 未找到相关视频")
                self.show_error(ViewsUtils.t("未找到相关视频，请稍后重试", "No videos found, please try again"))
                return
            
            print(f"Web搜索成功: 找到{len(search_data.get('videos', []))}个视频")
            
            # 显示搜索结果
            set_processbar('search_progress', 0.9)
            put_text(ViewsUtils.t("正在处理数据...", "Processing data..."))
            
            self.search_results = search_data
            self.display_results(search_data)
            
            # 导出数据
            if export_formats:
                set_processbar('search_progress', 0.95)
                put_text(ViewsUtils.t("正在导出数据...", "Exporting data..."))
                print(f"Web导出格式: {export_formats}")
                export_info = self.export_data(search_data, keyword, export_formats)
                print(f"Web导出结果: {export_info}")
                self.display_export_info(export_info)
            
            set_processbar('search_progress', 1.0)
            put_success(ViewsUtils.t("搜索完成！", "Search completed!"))
            
        except Exception as e:
            print(f"Web搜索异常: {str(e)}")
            import traceback
            traceback.print_exc()
            self.show_error(f"{ViewsUtils.t('搜索失败', 'Search failed')}: {str(e)}")
    
    async def async_search(self, keyword: str, count: int, sort_type: int, export_formats: list):
        """异步搜索方法"""
        try:
            # 显示进度
            put_processbar('search_progress')
            set_processbar('search_progress', 0.1)
            put_text(ViewsUtils.t("正在搜索视频...", "Searching videos..."))
            
            # 执行搜索
            search_data = await douyin_crawler.fetch_videos_complete_data(keyword, count)
            set_processbar('search_progress', 0.8)
            
            if not search_data or len(search_data.get('videos', [])) == 0:
                self.show_error(ViewsUtils.t("未找到相关视频", "No videos found"))
                return
            
            # 显示搜索结果
            set_processbar('search_progress', 0.9)
            put_text(ViewsUtils.t("正在处理数据...", "Processing data..."))
            
            self.search_results = search_data
            self.display_results(search_data)
            
            # 导出数据
            if export_formats:
                set_processbar('search_progress', 0.95)
                put_text(ViewsUtils.t("正在导出数据...", "Exporting data..."))
                export_info = self.export_data(search_data, keyword, export_formats)
                self.display_export_info(export_info)
            
            set_processbar('search_progress', 1.0)
            put_success(ViewsUtils.t("搜索完成！", "Search completed!"))
            
        except Exception as e:
            self.show_error(f"{ViewsUtils.t('搜索失败', 'Search failed')}: {str(e)}")
    
    def display_results(self, search_data: dict):
        """显示搜索结果"""
        put_html("<hr>")
        put_html("<h2>📊 搜索结果</h2>")
        
        search_info = search_data.get('search_info', {})
        videos = search_data.get('videos', [])
        
        # 搜索摘要
        put_table([
            [ViewsUtils.t("搜索关键字", "Keyword"), search_info.get('keyword', '')],
            [ViewsUtils.t("找到视频数", "Videos Found"), str(search_info.get('total_found', 0))],
            [ViewsUtils.t("搜索时间", "Search Time"), search_info.get('search_time', '')]
        ])
        
        # 视频列表
        for i, video in enumerate(videos, 1):
            with put_collapse(f"📹 视频 {i}: {video.get('desc', '无描述')[:30]}..."):
                self.display_single_video(video, i)
        
        # 操作按钮
        put_html("<hr>")
        put_buttons([
            {'label': ViewsUtils.t('重新搜索', 'Search Again'), 'value': 'search_again'},
            {'label': ViewsUtils.t('返回主页', 'Back to Home'), 'value': 'back_home'}
        ], onclick=self.handle_button_click)
    
    def display_single_video(self, video: dict, index: int):
        """显示单个视频信息"""
        author = video.get('author', {})
        stats = video.get('statistics', {})
        urls = video.get('video_urls', {})
        api_urls = video.get('api_urls', {})
        comments = video.get('comments', [])
        
        # 基本信息
        put_html(f"<h4>📝 基本信息</h4>")
        put_table([
            [ViewsUtils.t("视频ID", "Video ID"), video.get('aweme_id', '')],
            [ViewsUtils.t("视频描述", "Description"), video.get('desc', '无描述')],
            [ViewsUtils.t("作者昵称", "Author"), author.get('nickname', '')],
            [ViewsUtils.t("作者ID", "Author ID"), author.get('sec_user_id', '')]
        ])
        
        # 统计数据
        put_html(f"<h4>📈 统计数据</h4>")
        put_table([
            [ViewsUtils.t("点赞数", "Likes"), f"{stats.get('digg_count', 0):,}"],
            [ViewsUtils.t("评论数", "Comments"), f"{stats.get('comment_count', 0):,}"],
            [ViewsUtils.t("分享数", "Shares"), f"{stats.get('share_count', 0):,}"],
            [ViewsUtils.t("播放数", "Views"), f"{stats.get('play_count', 0):,}"]
        ])
        
        # 下载链接
        put_html(f"<h4>📥 下载链接</h4>")
        download_links = []
        if urls.get('no_watermark'):
            download_links.append([ViewsUtils.t("无水印视频", "No Watermark"), 
                                 put_link(ViewsUtils.t("点击下载", "Download"), urls['no_watermark'], new_window=True)])
        if urls.get('with_watermark'):
            download_links.append([ViewsUtils.t("有水印视频", "With Watermark"), 
                                 put_link(ViewsUtils.t("点击下载", "Download"), urls['with_watermark'], new_window=True)])
        
        if download_links:
            put_table(download_links)
        
        # API链接
        put_html(f"<h4>🔗 API链接</h4>")
        api_links = []
        if api_urls.get('full'):
            api_links.append([ViewsUtils.t("完整API", "Full API"), 
                            put_link(ViewsUtils.t("点击访问", "Visit"), api_urls['full'], new_window=True)])
        if api_urls.get('minimal'):
            api_links.append([ViewsUtils.t("精简API", "Minimal API"), 
                            put_link(ViewsUtils.t("点击访问", "Visit"), api_urls['minimal'], new_window=True)])
        
        if api_links:
            put_table(api_links)
        
        # 评论预览
        if comments:
            put_html(f"<h4>💬 评论预览 (前5条)</h4>")
            comment_rows = []
            for comment in comments[:5]:
                comment_text = comment.get('text', '无内容')[:100]
                if len(comment.get('text', '')) > 100:
                    comment_text += "..."
                
                comment_rows.append([
                    comment.get('user', {}).get('nickname', '匿名用户'),
                    comment_text,
                    f"{comment.get('digg_count', 0):,}"
                ])
            
            put_table([
                [ViewsUtils.t("用户", "User"), ViewsUtils.t("评论内容", "Content"), ViewsUtils.t("点赞", "Likes")]
            ] + comment_rows)
    
    def export_data(self, search_data: dict, keyword: str, formats: list) -> dict:
        """导出数据"""
        export_paths = {}
        
        try:
            print(f"开始导出数据: 关键词={keyword}, 格式={formats}")
            
            # 转换为小写进行比较，兼容大小写
            formats_lower = [fmt.lower() for fmt in formats]
            print(f"转换后的格式: {formats_lower}")
            
            if "json" in formats_lower:
                json_path = export_manager.export_to_json(search_data, keyword)
                export_paths["json"] = json_path
                print(f"JSON导出成功: {json_path}")
                # 检查文件是否真的存在
                if os.path.exists(json_path):
                    print(f"JSON文件确认存在，大小: {os.path.getsize(json_path)} bytes")
                else:
                    print(f"警告: JSON文件不存在于: {json_path}")
            
            if "csv" in formats_lower:
                csv_path = export_manager.export_to_csv(search_data, keyword)
                export_paths["csv"] = csv_path
                print(f"CSV导出成功: {csv_path}")
                # 检查文件是否真的存在
                if os.path.exists(csv_path):
                    print(f"CSV文件确认存在，大小: {os.path.getsize(csv_path)} bytes")
                else:
                    print(f"警告: CSV文件不存在于: {csv_path}")
            
            result = {
                "success": True,
                "paths": export_paths,
                "summary": export_manager.get_export_summary(search_data)
            }
            print(f"导出完成，结果: {result}")
            return result
            
        except Exception as e:
            print(f"导出数据异常: {str(e)}")
            import traceback
            traceback.print_exc()
            return {
                "success": False,
                "error": str(e)
            }
    
    def display_export_info(self, export_info: dict):
        """显示导出信息"""
        try:
            print("开始显示导出信息")
            put_html("<hr>")
            put_html("<h2>导出信息</h2>")
            
            if export_info.get("success"):
                paths = export_info.get("paths", {})
                summary = export_info.get("summary", {})
                
                print(f"导出路径: {paths}")
                print(f"导出摘要: {summary}")
                
                # 导出摘要
                put_table([
                    [ViewsUtils.t("总视频数", "Total Videos"), str(summary.get('total_videos', 0))],
                    [ViewsUtils.t("总评论数", "Total Comments"), str(summary.get('total_comments', 0))],
                    [ViewsUtils.t("总点赞数", "Total Likes"), f"{summary.get('total_likes', 0):,}"]
                ])
                
                # 文件下载链接
                put_html("<h4>导出文件</h4>")
                
                if paths:
                    for format_type, file_path in paths.items():
                        file_name = os.path.basename(file_path)
                        print(f"显示文件: {format_type} -> {file_name}")
                        put_html(f"[成功] {format_type.upper()}: {file_name}")
                else:
                    print("警告: 没有导出路径信息")
                    put_html("<p>警告: 没有找到导出文件信息</p>")
                    
            else:
                error_msg = export_info.get('error', 'Unknown error')
                print(f"导出失败: {error_msg}")
                put_error(f"{ViewsUtils.t('导出失败', 'Export failed')}: {error_msg}")
                
            print("导出信息显示完成")
            
        except Exception as e:
            print(f"display_export_info异常: {str(e)}")
            import traceback
            traceback.print_exc()
            put_error(f"显示导出信息时出错: {str(e)}")
    
    def show_error(self, message: str):
        """显示错误信息"""
        put_html("<hr>")
        put_error(message)
        put_buttons([
            {'label': ViewsUtils.t('重新搜索', 'Search Again'), 'value': 'search_again'}
        ], onclick=self.handle_button_click)
    
    def handle_button_click(self, choice):
        """处理按钮点击"""
        if choice == 'search_again':
            self.search_interface()
        elif choice == 'back_home':
            # 这里可以跳转回主页面，具体实现取决于主应用的路由
            put_html("<h2>返回主页功能待实现</h2>")
    
    def get_sort_name(self, sort_type: int) -> str:
        """获取排序方式名称"""
        sort_names = {
            0: "综合排序",
            1: "最新发布", 
            2: "最多点赞"
        }
        return sort_names.get(sort_type, "综合排序")


def video_search_interface():
    """视频搜索页面入口函数"""
    search = VideoSearch()
    search.search_interface()


if __name__ == "__main__":
    # 可以单独运行这个搜索界面
    start_server(video_search_interface, port=8081, debug=True)