import re
import os
import json
import hashlib
import logging
from llm_interface import LLMInterface
from config import TEXT_CACHE_DIR
from prompts import SHORT_TEXT_PROMPT, CHUNK_TEXT_PROMPT

logger = logging.getLogger(__name__)

class TextProcessor:
    def __init__(self):
        self.llm_interface = LLMInterface()
        self.max_chunk_size = 2000  # 每次处理的最大字符数
        self.cache_dir = TEXT_CACHE_DIR
        self._ensure_cache_dir()
        
    def _ensure_cache_dir(self):
        """确保缓存目录存在"""
        if not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)
            logger.info(f"创建缓存目录: {self.cache_dir}")
            
    def _get_video_hash(self, video_path):
        """计算视频文件的哈希值"""
        sha256_hash = hashlib.sha256()
        with open(video_path, "rb") as f:
            for byte_block in iter(lambda: f.read(4096), b""):
                sha256_hash.update(byte_block)
        return sha256_hash.hexdigest()
        
    def _get_cache_path(self, video_path):
        """获取缓存文件路径"""
        video_hash = self._get_video_hash(video_path)
        return os.path.join(self.cache_dir, f"{video_hash}.json")
        
    def _save_to_cache(self, video_path, processed_text):
        """保存处理后的文本到缓存"""
        try:
            cache_path = self._get_cache_path(video_path)
            logger.debug(f"保存缓存 - 视频路径: {video_path}")
            logger.debug(f"保存缓存 - 缓存路径: {cache_path}")
            
            # 保存原始文本文件
            txt_filename = os.path.splitext(os.path.basename(video_path))[0] + ".txt"
            txt_path = os.path.join(self.cache_dir, txt_filename)
            logger.debug(f"保存原始文本文件: {txt_path}")
            
            with open(txt_path, "w", encoding="utf-8") as f:
                f.write(processed_text)
            
            cache_data = {
                "video_path": video_path,
                "processed_text": processed_text,
                "raw_text_file": txt_filename,  # 添加原始文本文件名
                "timestamp": os.path.getmtime(video_path)
            }
            
            with open(cache_path, "w", encoding="utf-8") as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            logger.info("缓存保存成功")
        except Exception as e:
            logger.error(f"保存缓存时出错: {str(e)}", exc_info=True)
            raise
            
    def _load_from_cache(self, video_path):
        """从缓存加载处理后的文本"""
        cache_path = self._get_cache_path(video_path)
        if not os.path.exists(cache_path):
            logger.debug(f"缓存文件不存在: {cache_path}")
            return None
            
        try:
            with open(cache_path, "r", encoding="utf-8") as f:
                cache_data = json.load(f)
                # 检查视频文件是否被修改
                if cache_data["timestamp"] == os.path.getmtime(video_path):
                    logger.info("从缓存加载处理后的文本")
                    return cache_data["processed_text"]
                else:
                    logger.debug("视频文件已修改，缓存无效")
        except Exception as e:
            logger.error(f"读取缓存时出错: {str(e)}", exc_info=True)
        return None
        
    def process_text(self, text, video_path=None):
        """处理文本，添加标题和格式化"""
        logger.info("开始处理文本...")
        logger.debug(f"输入文本长度: {len(text)}")
        
        # 创建调试目录
        debug_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "debug")
        os.makedirs(debug_dir, exist_ok=True)
        
        # 保存原始输入文本到多个位置（确保能找到）
        raw_input_file1 = os.path.join(self.cache_dir, "raw_input.txt")
        raw_input_file2 = os.path.join(debug_dir, "raw_input.txt")
        
        with open(raw_input_file1, "w", encoding="utf-8") as f:
            f.write(text)
        with open(raw_input_file2, "w", encoding="utf-8") as f:
            f.write(text)
        
        logger.info(f"已保存原始输入文本到: {raw_input_file1} 和 {raw_input_file2}")
        
        if video_path:
            raw_input_file = os.path.join(self.cache_dir, f"{os.path.basename(video_path)}_raw_input.txt")
            with open(raw_input_file, "w", encoding="utf-8") as f:
                f.write(text)
            logger.info(f"已保存原始输入文本到: {raw_input_file}")
        
        # 如果提供了视频路径，尝试从缓存加载
        if video_path and os.path.exists(video_path):
            cached_text = self._load_from_cache(video_path)
            if cached_text:
                logger.debug(f"从缓存加载的文本长度: {len(cached_text)}")
                return cached_text
        
        # 初始化processed_text
        processed_text = ""
        
        # 如果文本较短，直接处理
        if len(text) <= self.max_chunk_size:
            logger.info("处理短文本")
            processed_text = self._process_short_text(text)
        else:
            # 处理长文本
            chunks = self._split_into_chunks(text)
            logger.info(f"文本已分割为 {len(chunks)} 个部分")
            
            processed_chunks = []
            for i, chunk in enumerate(chunks, 1):
                logger.info(f"正在处理第 {i} 个部分...")
                logger.debug(f"第 {i} 部分长度: {len(chunk)}")
                processed_chunk = self._process_chunk(chunk)
                processed_chunks.append(processed_chunk)
            
            # 合并处理后的文本
            processed_text = "".join(processed_chunks)
            
            # 保存合并后的结果
            merged_file = os.path.join(self.cache_dir, "merged_result.md")
            with open(merged_file, "w", encoding="utf-8") as f:
                f.write(processed_text)
            logger.info(f"已保存合并后的处理结果到: {merged_file}")
            
        logger.debug(f"处理后的文本长度: {len(processed_text)}")
        
        # 如果提供了视频路径，保存到缓存
        if video_path and os.path.exists(video_path):
            self._save_to_cache(video_path, processed_text)
            
        logger.info("文本处理完成")
        return processed_text
    
    def _process_short_text(self, text):
        """处理短文本，只生成标题"""
        # 创建调试目录
        debug_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "debug")
        os.makedirs(debug_dir, exist_ok=True)
        
        # 先保存原始内容
        original_file = os.path.join(debug_dir, "short_text_original.txt")
        with open(original_file, "w", encoding="utf-8") as f:
            f.write(text)
        logger.debug(f"已保存原始短文本到: {original_file}")
        
        prompt = SHORT_TEXT_PROMPT.format(text=text)
        try:
            logger.debug("正在处理短文本...")
            # 保存完整的prompt到文件，便于调试
            debug_file = os.path.join(debug_dir, "short_text_prompt.txt")
            with open(debug_file, "w", encoding="utf-8") as f:
                f.write(prompt)
            logger.info(f"已保存短文本处理提示词到: {debug_file}")
            
            formatted_text = self.llm_interface.generate_text(prompt)
            
            # 保存LLM响应到文件，便于调试
            response_file = os.path.join(debug_dir, "short_text_response.txt")
            with open(response_file, "w", encoding="utf-8") as f:
                f.write(formatted_text)
            logger.info(f"已保存LLM响应到: {response_file}")
            
            # 清理输出文本
            formatted_text = formatted_text.strip()
            # 确保文本以标题开始
            if not formatted_text.startswith('# '):
                formatted_text = "# " + formatted_text
            logger.debug("短文本处理完成")
            return formatted_text
        except Exception as e:
            logger.error(f"处理短文本时出错: {str(e)}", exc_info=True)
            return f"# 视频转写文本\n\n{text}"
    
    def _split_into_chunks(self, text):
        """将长文本分割成合适大小的块"""
        # 按句子分割，保留分隔符
        sentences = re.split(r'([。！？])', text)
        chunks = []
        current_chunk = ""
        
        # 记录原始文本长度
        total_length = len(text)
        processed_length = 0
        
        for i in range(0, len(sentences), 2):
            sentence = sentences[i] + (sentences[i+1] if i+1 < len(sentences) else "")
            if len(current_chunk) + len(sentence) > self.max_chunk_size:
                if current_chunk:
                    chunks.append(current_chunk)
                    processed_length += len(current_chunk)
                current_chunk = sentence
            else:
                current_chunk += sentence
        
        if current_chunk:
            chunks.append(current_chunk)
            processed_length += len(current_chunk)
            
        # 检查是否有内容丢失
        if processed_length != total_length:
            logger.warning(f"文本分割可能丢失内容: 原始长度={total_length}, 处理后长度={processed_length}")
        
        # 保存每个分块到文件以便调试
        for i, chunk in enumerate(chunks):
            chunk_file = os.path.join(self.cache_dir, f"split_chunk_{i+1}.txt")
            with open(chunk_file, "w", encoding="utf-8") as f:
                f.write(chunk)
            logger.debug(f"已保存分割后的文本块 {i+1}/{len(chunks)} 到: {chunk_file}, 长度: {len(chunk)}")
            
        return chunks
    
    def _process_chunk(self, text):
        """处理单个文本块"""
        # 创建调试目录
        debug_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "debug")
        os.makedirs(debug_dir, exist_ok=True)
        
        # 为每个块生成唯一文件名
        chunk_id = hashlib.md5(text[:100].encode()).hexdigest()[:8]
        
        # 先保存原始块内容
        chunk_file = os.path.join(debug_dir, f"chunk_{chunk_id}_original.txt")
        with open(chunk_file, "w", encoding="utf-8") as f:
            f.write(text)
        logger.debug(f"已保存原始文本块到: {chunk_file}")
        
        prompt = CHUNK_TEXT_PROMPT.format(text=text)
        try:
            logger.debug("正在处理文本块...")
            
            # 保存完整的prompt到文件，便于调试
            debug_file = os.path.join(debug_dir, f"chunk_{chunk_id}_prompt.txt")
            with open(debug_file, "w", encoding="utf-8") as f:
                f.write(prompt)
            logger.info(f"已保存文本块处理提示词到: {debug_file}")
            
            formatted_text = self.llm_interface.generate_text(prompt)
            
            # 保存LLM响应到文件，便于调试
            response_file = os.path.join(debug_dir, f"chunk_{chunk_id}_response.txt")
            with open(response_file, "w", encoding="utf-8") as f:
                f.write(formatted_text)
            logger.info(f"已保存LLM响应到: {response_file}")
            
            # 清理输出文本
            formatted_text = formatted_text.strip()
            # 确保文本以标题开始
            # if not formatted_text.startswith('# '):
            #     formatted_text = "# 视频转写文本\n\n" + formatted_text
            logger.debug("文本块处理完成")
            return formatted_text
        except Exception as e:
            logger.error(f"处理文本块时出错: {str(e)}", exc_info=True)
            return f"## 未处理文本\n\n{text}"
