from app.gpt.base import GPT
from app.gpt.prompt_builder import generate_base_prompt
from app.models.gpt_model import GPTSource
from app.gpt.prompt import get_prompts
from app.gpt.utils import fix_markdown
from app.models.transcriber_model import TranscriptSegment
from datetime import timedelta
from typing import List
import math
import logging
import json

logger = logging.getLogger(__name__)

class UniversalGPT(GPT):
    def __init__(self, client, model: str, temperature: float = 0.7):
        self.client = client
        self.model = model
        self.temperature = temperature
        self.screenshot = False
        self.link = False
        self.max_tokens_per_chunk = 500  # 保持较小的块大小
        self.max_retries = 3
        self.max_request_size = 100000  # 增加请求大小限制到100KB
        logger.info(f"UniversalGPT 初始化完成，使用模型: {model}")

    def _format_time(self, seconds: float) -> str:
        return str(timedelta(seconds=int(seconds)))[2:]

    def _build_segment_text(self, segments: List[TranscriptSegment]) -> str:
        return "\n".join(
            f"{self._format_time(seg.start)} - {seg.text.strip()}"
            for seg in segments
        )

    def _estimate_request_size(self, messages) -> int:
        """估算请求大小（字节）"""
        try:
            return len(json.dumps(messages).encode('utf-8'))
        except Exception as e:
            logger.error(f"估算请求大小失败: {e}")
            return 0

    def _reduce_prompt_size(self, messages, max_size: int = None):
        """智能减小prompt大小"""
        if not messages or not isinstance(messages, list):
            return messages

        if max_size is None:
            max_size = self.max_request_size

        try:
            current_size = self._estimate_request_size(messages)
            if current_size <= max_size:
                return messages

            # 1. 首先尝试移除不必要的内容
            messages = self._remove_unnecessary_content(messages)
            current_size = self._estimate_request_size(messages)
            if current_size <= max_size:
                return messages

            # 2. 如果还是太大，尝试截断内容
            for message in messages:
                if isinstance(message.get('content'), str):
                    content_length = len(message['content'])
                    # 计算需要保留的比例
                    ratio = max_size / current_size * 0.9  # 留10%余量
                    new_length = int(content_length * ratio)
                    # 确保不会截断得太短
                    new_length = max(new_length, 1000)
                    message['content'] = message['content'][:new_length]

            return messages
        except Exception as e:
            logger.error(f"减小prompt大小失败: {e}")
            return messages

    def _remove_unnecessary_content(self, messages):
        """移除不必要的内容"""
        if not messages:
            return messages

        try:
            for message in messages:
                if isinstance(message.get('content'), list):
                    # 移除图片URL等非文本内容
                    message['content'] = [
                        item for item in message['content']
                        if item.get('type') == 'text'
                    ]
                elif isinstance(message.get('content'), str):
                    # 移除可能的重复内容
                    lines = message['content'].split('\n')
                    unique_lines = []
                    seen = set()
                    for line in lines:
                        if line not in seen:
                            unique_lines.append(line)
                            seen.add(line)
                    message['content'] = '\n'.join(unique_lines)
            return messages
        except Exception as e:
            logger.error(f"移除不必要内容失败: {e}")
            return messages

    def ensure_segments_type(self, segments) -> List[TranscriptSegment]:
        return [TranscriptSegment(**seg) if isinstance(seg, dict) else seg for seg in segments]

    def _split_segments(self, segments: List[TranscriptSegment], chunk_size: int) -> List[List[TranscriptSegment]]:
        """
        将segments分成多个小块，使用更激进的分块策略
        """
        chunks = []
        current_chunk = []
        current_length = 0
        max_chunk_size = chunk_size * 2  # 字符数限制
        
        for segment in segments:
            segment_length = len(segment.text)
            
            # 如果单个segment太大，需要拆分
            if segment_length > max_chunk_size:
                if current_chunk:
                    chunks.append(current_chunk)
                    current_chunk = []
                    current_length = 0
                
                # 将大的segment拆分成更小的部分
                words = segment.text.split()
                temp_text = ""
                temp_segments = []
                
                for word in words:
                    if len(temp_text) + len(word) + 1 <= max_chunk_size:
                        temp_text += " " + word if temp_text else word
                    else:
                        temp_segments.append(
                            TranscriptSegment(
                                start=segment.start,
                                end=segment.end,
                                text=temp_text.strip()
                            )
                        )
                        temp_text = word
                
                if temp_text:
                    temp_segments.append(
                        TranscriptSegment(
                            start=segment.start,
                            end=segment.end,
                            text=temp_text.strip()
                        )
                    )
                
                chunks.extend([[seg] for seg in temp_segments])
                continue
            
            # 检查是否需要开始新的chunk
            if current_length + segment_length > max_chunk_size:
                if current_chunk:
                    chunks.append(current_chunk)
                current_chunk = [segment]
                current_length = segment_length
            else:
                current_chunk.append(segment)
                current_length += segment_length
        
        if current_chunk:
            chunks.append(current_chunk)
        
        return chunks

    def _try_api_call(self, messages, chunk_size):
        """尝试API调用，包含完整的错误处理和重试逻辑"""
        original_messages = messages.copy()
        last_error = None
        
        for i in range(self.max_retries):
            try:
                # 预检查请求大小
                request_size = self._estimate_request_size(messages)
                if request_size > self.max_request_size:
                    logger.warning(f"请求大小 ({request_size} bytes) 超过限制 ({self.max_request_size} bytes)")
                    messages = self._reduce_prompt_size(messages)
                    continue

                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    temperature=self.temperature
                )
                
                if not response or not response.choices:
                    raise Exception("API返回为空")
                    
                result = response.choices[0].message.content.strip()
                if not result:
                    raise Exception("API返回内容为空")
                    
                return result
                
            except Exception as e:
                last_error = e
                logger.error(f"API调用失败 (attempt {i+1}/{self.max_retries}): {e}")
                
                if "413" in str(e) and i < self.max_retries - 1:
                    chunk_size = int(chunk_size * 0.5)
                    messages = self._reduce_prompt_size(original_messages)
                    continue
                    
                if i == self.max_retries - 1:
                    # 最后一次尝试，使用最小的配置
                    try:
                        minimal_message = {
                            "role": "user",
                            "content": "请总结上述内容的要点。"
                        }
                        response = self.client.chat.completions.create(
                            model=self.model,
                            messages=[minimal_message],
                            temperature=0.3
                        )
                        return response.choices[0].message.content.strip()
                    except Exception as final_e:
                        logger.error(f"最终尝试也失败: {final_e}")
                        
        # 如果所有尝试都失败，返回错误信息
        error_message = f"无法完成API调用，最后的错误: {last_error}"
        logger.error(error_message)
        return error_message  # 返回错误信息而不是 None

    def create_messages(self, segments: List[TranscriptSegment], **kwargs):
        # 确保_format中包含screenshot
        _format = kwargs.get('_format', [])
        if self.screenshot and 'screenshot' not in _format:
            _format.append('screenshot')
            logger.info("UniversalGPT: 启用截图功能，添加screenshot到_format")
            
        if self.link and 'link' not in _format:
            _format.append('link')
            logger.info("UniversalGPT: 启用链接功能，添加link到_format")

        # 获取最新的提示词配置
        prompts = get_prompts()
        logger.info(f"UniversalGPT: 已获取最新提示词配置，准备生成提示词")

        content_text = generate_base_prompt(
            title=kwargs.get('title'),
            segment_text=self._build_segment_text(segments),
            tags=kwargs.get('tags'),
            _format=_format,  # 使用更新后的_format
            style=kwargs.get('style'),
            extras=kwargs.get('extras'),
        )
        
        logger.info(f"UniversalGPT: 生成提示词完成，长度: {len(content_text)} 字符")
        logger.info(f"UniversalGPT: 提示词格式: {_format}, 风格: {kwargs.get('style')}")

        content = [{"type": "text", "text": content_text}]
        video_img_urls = kwargs.get('video_img_urls', [])

        # 只在小块处理时添加图片URL
        if len(segments) < 10 and video_img_urls:
            logger.info(f"UniversalGPT: 添加 {len(video_img_urls)} 个图片URL到提示词")
            for url in video_img_urls:
                content.append({
                    "type": "image_url",
                    "image_url": {
                        "url": url,
                        "detail": "auto"
                    }
                })

        messages = [{
            "role": "user",
            "content": content
        }]

        return messages

    def summarize(self, source: GPTSource) -> str:
        logger.info(f"UniversalGPT: 开始生成笔记，标题: {source.title}, 模型: {self.model}")
        self.screenshot = source.screenshot
        self.link = source.link
        source.segment = self.ensure_segments_type(source.segment)

        # 如果文本较小，直接处理
        if len(source.segment) < 20:  # 降低直接处理的阈值
            logger.info("UniversalGPT: 文本较小，直接处理")
            messages = self.create_messages(
                source.segment,
                title=source.title,
                tags=source.tags,
                video_img_urls=source.video_img_urls,
                _format=source._format,
                style=source.style,
                extras=source.extras
            )
            return self._try_api_call(messages, self.max_tokens_per_chunk)

        # 对于大文本，进行分块处理
        logger.info(f"UniversalGPT: 文本较大，分块处理，共 {len(source.segment)} 个片段")
        chunks = self._split_segments(source.segment, self.max_tokens_per_chunk)
        logger.info(f"UniversalGPT: 分为 {len(chunks)} 个块进行处理")
        summaries = []

        # 处理每个块
        for i, chunk in enumerate(chunks):
            logger.info(f"UniversalGPT: 处理第 {i+1}/{len(chunks)} 块，包含 {len(chunk)} 个片段")
            messages = self.create_messages(
                chunk,
                title=f"{source.title} (Part {i+1}/{len(chunks)})",
                tags=source.tags,
                video_img_urls=[],  # 分块处理时不包含图片URL
                _format=source._format,
                style=source.style,
                extras=source.extras
            )
            
            summary = self._try_api_call(messages, self.max_tokens_per_chunk)
            summaries.append(summary)

        # 如果只有一个块，直接返回结果
        if len(summaries) == 1:
            logger.info("UniversalGPT: 只有一个块，直接返回结果")
            return summaries[0]

        # 合并多个块的摘要
        logger.info("UniversalGPT: 合并多个块的摘要")
        combined_summary = "\n\n".join([
            f"## 第 {i+1} 部分\n{summary}"
            for i, summary in enumerate(summaries)
        ])

        # 生成最终的总结（简化prompt）
        logger.info("UniversalGPT: 生成最终总结")
        final_messages = [{
            "role": "user",
            "content": f"""请整合以下笔记，保持重要信息和结构，去除重复：

{combined_summary}

要求：保持时间标记，保持重要信息，合并相似内容，保持结构清晰。
"""
        }]

        result = self._try_api_call(final_messages, self.max_tokens_per_chunk)
        logger.info("UniversalGPT: 笔记生成完成")
        return result
