import os
from typing import Union
import uuid

from pydantic import HttpUrl

from app.db.video_task_dao import insert_video_task, delete_task_by_video
from app.downloaders.base import Downloader
from app.downloaders.bilibili_downloader import BilibiliDownloader
from app.downloaders.douyin_downloader import DouyinDownloader
from app.downloaders.youtube_downloader import YoutubeDownloader
from app.gpt.base import GPT
from app.gpt.deepseek_gpt import DeepSeekGPT
from app.gpt.openai_gpt import OpenaiGPT
from app.gpt.qwen_gpt import QwenGPT
from app.models.gpt_model import GPTSource
from app.models.notes_model import NoteResult
from app.models.notes_model import AudioDownloadResult
from app.enmus.note_enums import DownloadQuality
from app.models.transcriber_model import TranscriptResult
from app.transcriber.base import Transcriber
from app.transcriber.transcriber_provider import get_transcriber
from app.transcriber.whisper import WhisperTranscriber
import re

from app.utils.note_helper import replace_content_markers
from app.utils.video_helper import generate_screenshot

# from app.services.whisperer import transcribe_audio
# from app.services.gpt import summarize_text
from dotenv import load_dotenv
from app.utils.logger import get_logger
logger = get_logger(__name__)
load_dotenv()
BACKEND_BASE_URL = os.getenv("API_BASE_URL", "http://localhost:8000")

output_dir = os.getenv('OUT_DIR')
image_base_url = os.getenv('IMAGE_BASE_URL')
logger.info("starting up")



class NoteGenerator:
    def __init__(self):
        self.model_size: str = 'base'
        self.device: Union[str, None] = None
        self.transcriber_type = 'fast-whisper'
        self.transcriber = self.get_transcriber()
        # TODO 需要更换为可调节

        self.provider = os.getenv('MODEl_PROVIDER','openai')
        self.video_path = None
        logger.info("初始化NoteGenerator")


    def get_gpt(self) -> GPT:
        self.provider = self.provider.lower()
        if self.provider == 'openai':
            logger.info("使用OpenAI")
            return OpenaiGPT()
        elif self.provider == 'deepseek':
            logger.info("使用DeepSeek")
            return DeepSeekGPT()
        elif self.provider == 'qwen':
            logger.info("使用Qwen")
            return QwenGPT()
        else:
            self.provider = 'openai'
            logger.warning("不支持的AI提供商，使用 OpenAI 做完GPT")
            return OpenaiGPT()


    def get_downloader(self, platform: str) -> Downloader:
        if platform == "bilibili":
            logger.info("下载 Bilibili 平台视频")
            return BilibiliDownloader()
        elif platform == "youtube":
            logger.info("下载 YouTube 平台视频")
            return YoutubeDownloader()
        elif platform == 'douyin':
            logger.info("下载 Douyin 平台视频")
            return DouyinDownloader()
        else:
            logger.warning("不支持的平台")
            raise ValueError(f"不支持的平台：{platform}")

    def get_transcriber(self) -> Transcriber:
        '''

        :param transcriber: 选择的转义器
        :return:
        '''
        if self.transcriber_type == 'fast-whisper':
            logger.info("使用Whisper")
            return get_transcriber()
        else:
            logger.warning("不支持的转义器")
            raise ValueError(f"不支持的转义器：{self.transcriber}")

    def save_meta(self, video_id, platform, task_id):
        logger.info(f"记录已经生成的数据信息")
        insert_video_task(video_id=video_id, platform=platform, task_id=task_id)

    def insert_screenshots_into_markdown(self, markdown: str, video_path: str, image_base_url: str,
                                         output_dir: str) -> str:
        """
        扫描 markdown 中的 *Screenshot-xx:xx，生成截图并插入 markdown 图片
        :param markdown:
        :param image_base_url: 最终返回给前端的路径前缀（如 /static/screenshots）
        """
        matches = self.extract_screenshot_timestamps(markdown)
        new_markdown = markdown
        logger.info(f"开始为笔记生成截图")
        try:
            for idx, (marker, ts) in enumerate(matches):
                image_path = generate_screenshot(video_path, output_dir, ts, idx)
                image_relative_path = os.path.join(image_base_url, os.path.basename(image_path)).replace("\\", "/")
                image_url = f"{BACKEND_BASE_URL.rstrip('/')}/{image_relative_path.lstrip('/')}"
                replacement = f"![]({image_url})"
                new_markdown = new_markdown.replace(marker, replacement, 1)

            return new_markdown
        except Exception as e:
            logger.error(f"截图生成失败：{e}")
            raise e

    @staticmethod
    def delete_note(video_id: str, platform: str):
        logger.info(f"删除生成的笔记记录")
        return delete_task_by_video(video_id, platform)

    import re

    def extract_screenshot_timestamps(self, markdown: str) -> list[tuple[str, int]]:
        """
        从 Markdown 中提取 Screenshot 时间标记（如 *Screenshot-03:39 或 Screenshot-[03:39]），
        并返回匹配文本和对应时间戳（秒）
        """
        logger.info(f"开始提取截图时间标记")
        pattern = r"(?:\*Screenshot-(\d{2}):(\d{2})|Screenshot-\[(\d{2}):(\d{2})\])"
        matches = list(re.finditer(pattern, markdown))
        results = []
        for match in matches:
            mm = match.group(1) or match.group(3)
            ss = match.group(2) or match.group(4)
            total_seconds = int(mm) * 60 + int(ss)
            results.append((match.group(0), total_seconds))
        return results

    def generate(
            self,

            video_url: Union[str, HttpUrl],
            platform: str,
            quality: DownloadQuality = DownloadQuality.medium,
            task_id: Union[str, None] = None,
            link: bool = False,
            screenshot: bool = False,
            path: Union[str, None] = None

    ) -> NoteResult:
        logger.info(f"开始解析并生成笔记")
        # 1. 选择下载器
        downloader = self.get_downloader(platform)
        gpt = self.get_gpt()
        logger.info(f'使用{downloader.__class__.__name__}下载器')
        logger.info(f'使用{gpt.__class__.__name__}GPT')
        logger.info(f'视频地址：{video_url}')
        if screenshot:

            video_path = downloader.download_video(video_url)
            self.video_path = video_path
            print(video_path)

        # 2. 下载音频
        audio: AudioDownloadResult = downloader.download(
            video_url=video_url,
            quality=quality,
            output_dir=path,
            need_video=screenshot

        )
        logger.info(f"下载音频成功，文件路径：{audio.file_path}")
        # 3. Whisper 转写
        transcript: TranscriptResult = self.transcriber.transcript(file_path=audio.file_path)
        logger.info(f"Whisper 转写成功，转写结果：{transcript.full_text}")
        # 4. GPT 总结
        source = GPTSource(
            title=audio.title,
            segment=transcript.segments,
            tags=audio.raw_info.get('tags'),
            screenshot=screenshot,
            link=link
        )
        logger.info(f"GPT 总结完成，总结结果：{source}")
        markdown: str = gpt.summarize(source)
        print("markdown结果", markdown)

        markdown = replace_content_markers(markdown=markdown, video_id=audio.video_id, platform=platform)
        if self.video_path:
            markdown = self.insert_screenshots_into_markdown(markdown, self.video_path, image_base_url, output_dir)
        self.save_meta(video_id=audio.video_id, platform=platform, task_id=task_id)
        # 5. 返回结构体
        return NoteResult(
            markdown=markdown,
            transcript=transcript,
            audio_meta=audio
        )

    def generate_from_local(
            self,
            video_path: str,
            quality: DownloadQuality = DownloadQuality.medium,
            task_id: Union[str, None] = None,
            link: bool = False,  # 对于本地视频，此参数不再使用，保留是为了接口兼容
            screenshot: bool = False
    ) -> NoteResult:
        """
        从本地视频文件直接生成笔记
        """
        logger.info(f"开始处理本地视频文件并生成笔记")
        gpt = self.get_gpt()
        logger.info(f'使用{gpt.__class__.__name__}GPT')
        logger.info(f'本地视频路径：{video_path}')
        
        # 提取音频或使用视频直接进行转写
        import subprocess
        import os
        import json
        
        # 获取视频时长
        try:
            duration_cmd = [
                'ffprobe', 
                '-v', 'error', 
                '-show_entries', 'format=duration', 
                '-of', 'json', 
                video_path
            ]
            duration_result = subprocess.run(duration_cmd, capture_output=True, text=True, check=True)
            duration_data = json.loads(duration_result.stdout)
            duration = float(duration_data['format']['duration'])
            logger.info(f"视频时长: {duration} 秒")
        except Exception as e:
            logger.warning(f"无法获取视频时长: {e}")
            duration = 0.0
            
        # 创建音频保存目录
        audio_dir = os.path.join("static", "tempAudio")
        os.makedirs(audio_dir, exist_ok=True)
        
        # 生成唯一的音频文件名
        video_filename = os.path.basename(video_path)
        video_name = os.path.splitext(video_filename)[0]
        audio_filename = f"{video_name}_{task_id if task_id else str(uuid.uuid4())}.wav"
        audio_path = os.path.join(audio_dir, audio_filename)
        
        # 使用ffmpeg提取音频
        cmd = [
            'ffmpeg', '-i', video_path, 
            '-vn', '-acodec', 'pcm_s16le', '-ar', '16000', '-ac', '1',
            audio_path
        ]
        
        try:
            subprocess.run(cmd, check=True)
            logger.info(f"音频提取成功，文件路径：{audio_path}")
            
            # 准备本地文件的元数据
            video_id = f"local_{task_id}" if task_id else f"local_{os.path.splitext(video_filename)[0]}"
            
            # 将处理的视频路径保存起来，用于后续截图
            if screenshot:
                self.video_path = video_path
            
            # 创建一个AudioDownloadResult对象
            audio = AudioDownloadResult(
                title=video_filename,
                file_path=audio_path,
                video_id=video_id,
                raw_info={'tags': [], 'title': video_filename},
                duration=duration,  # 使用提取的实际时长
                cover_url=None,  # 本地文件没有封面图
                platform="local"  # 平台标识为local
            )
            
            # 3. Whisper 转写
            transcript: TranscriptResult = self.transcriber.transcript(file_path=audio.file_path)
            logger.info(f"Whisper 转写成功，转写结果：{transcript.full_text}")
            
            # 4. GPT 总结
            source = GPTSource(
                title=audio.title,
                segment=transcript.segments,
                tags=audio.raw_info.get('tags'),
                screenshot=screenshot,
                link=False  # 本地视频固定为False
            )
            logger.info(f"准备GPT总结，源数据：{source}")
            
            markdown: str = gpt.summarize(source)
            print("markdown结果", markdown)
            
            # 替换内容标记
            markdown = replace_content_markers(markdown=markdown, video_id=audio.video_id, platform="local")
            
            # 如果需要截图
            if self.video_path:
                markdown = self.insert_screenshots_into_markdown(markdown, self.video_path, image_base_url, output_dir)
            
            # 构建结果对象
            note_result = NoteResult(
                title=audio.title,
                markdown=markdown,
                raw_text=transcript.full_text,
                video_id=audio.video_id,
                platform="local",
                transcript=transcript,
                audio_meta=audio
            )
            
            # 保存任务元数据
            if task_id:
                self.save_meta(video_id=audio.video_id, platform="local", task_id=task_id)
                
            return note_result
            
        except Exception as e:
            logger.error(f"本地视频处理失败：{e}")
            # 不删除音频文件，保留以便调试
            raise e

    def generate_from_audio(
            self,
            audio_path: str,
            task_id: Union[str, None] = None,
            screenshot: bool = False  # 这个参数对音频无效，但为了接口一致性保留
    ) -> NoteResult:
        """
        从本地音频文件直接生成笔记
        """
        logger.info(f"开始处理本地音频文件并生成笔记")
        gpt = self.get_gpt()
        logger.info(f'使用{gpt.__class__.__name__}GPT')
        logger.info(f'本地音频路径：{audio_path}')
        
        import subprocess
        import os
        import json
        
        # 获取音频时长
        try:
            duration_cmd = [
                'ffprobe', 
                '-v', 'error', 
                '-show_entries', 'format=duration', 
                '-of', 'json', 
                audio_path
            ]
            duration_result = subprocess.run(duration_cmd, capture_output=True, text=True, check=True)
            duration_data = json.loads(duration_result.stdout)
            duration = float(duration_data['format']['duration'])
            logger.info(f"音频时长: {duration} 秒")
        except Exception as e:
            logger.warning(f"无法获取音频时长: {e}")
            duration = 0.0
            
        # 如果音频不是wav格式，需要转换为wav格式供whisper使用
        audio_format = os.path.splitext(audio_path)[1].lower()
        temp_wav_path = audio_path
        
        if audio_format != '.wav':
            # 创建音频保存目录
            audio_dir = os.path.join("static", "tempAudio")
            os.makedirs(audio_dir, exist_ok=True)
            
            # 生成唯一的音频文件名
            audio_filename = os.path.basename(audio_path)
            audio_name = os.path.splitext(audio_filename)[0]
            wav_filename = f"{audio_name}_{task_id if task_id else str(uuid.uuid4())}.wav"
            temp_wav_path = os.path.join(audio_dir, wav_filename)
            
            # 转换音频格式
            convert_cmd = [
                'ffmpeg', '-i', audio_path, 
                '-acodec', 'pcm_s16le', '-ar', '16000', '-ac', '1',
                temp_wav_path
            ]
            
            try:
                subprocess.run(convert_cmd, check=True)
                logger.info(f"音频转换成功，新文件路径：{temp_wav_path}")
            except Exception as e:
                logger.error(f"音频转换失败：{e}")
                raise e
            
        try:
            # 准备音频文件的元数据
            audio_filename = os.path.basename(audio_path)
            audio_id = f"audio_{task_id}" if task_id else f"audio_{os.path.splitext(audio_filename)[0]}"
            
            # 创建一个AudioDownloadResult对象
            audio = AudioDownloadResult(
                title=audio_filename,
                file_path=temp_wav_path,
                video_id=audio_id,  # 用audio_id存储
                raw_info={'tags': [], 'title': audio_filename},
                duration=duration,
                cover_url=None,
                platform="audio"  # 平台标识为audio
            )
            
            # Whisper 转写
            transcript: TranscriptResult = self.transcriber.transcript(file_path=audio.file_path)
            logger.info(f"Whisper 转写成功，转写结果：{transcript.full_text}")
            
            # GPT 总结
            source = GPTSource(
                title=audio.title,
                segment=transcript.segments,
                tags=audio.raw_info.get('tags'),
                screenshot=False,  # 音频不支持截图
                link=False        # 音频不支持链接
            )
            logger.info(f"准备GPT总结，源数据：{source}")
            
            markdown: str = gpt.summarize(source)
            logger.info("markdown结果生成完成")
            
            # 替换内容标记
            markdown = replace_content_markers(markdown=markdown, video_id=audio.video_id, platform="audio")
            
            # 构建结果对象
            note_result = NoteResult(
                title=audio.title,
                markdown=markdown,
                raw_text=transcript.full_text,
                video_id=audio.video_id,
                platform="audio",
                transcript=transcript,
                audio_meta=audio
            )
            
            # 保存任务元数据
            if task_id:
                self.save_meta(video_id=audio.video_id, platform="audio", task_id=task_id)
                
            return note_result
            
        except Exception as e:
            logger.error(f"本地音频处理失败：{e}")
            raise e
                


