from datetime import datetime
import json
import os
import re
import shutil
import subprocess
import sys

import unicodedata

from openai import OpenAI
from src.database.dao.upload_file_dao import UploadedFileDAO
from src.database.dao.video_dao import VideoDAO
from src.database.dao_factory import DAOFactory
from src.database.manager import DBManager
from src.modules.video_merger import AdvancedVideoMerger as VideoMerger
from src.utils.custom_synonyms import CUSTOM_SYNONYMS
from src.utils.logger import log_error, log_info
import random
import sqlite3
import numpy as np
import faiss
import itertools



class Util:
    _instances = {}
    @staticmethod
    def expand_keywords(word):
        return CUSTOM_SYNONYMS.get(word, [word])

    @staticmethod
    def get_startupinfo():
        """获取平台相关的启动信息"""
        if 'startupinfo' not in Util._instances:
            if sys.platform == "win32":
                startupinfo = subprocess.STARTUPINFO()
                startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
            else:
                startupinfo = None
            Util._instances['startupinfo'] = startupinfo
        return Util._instances['startupinfo']
    
    @staticmethod
    def extract_subtitle(video_path):
        """
        提取视频的音频并使用 Whisper 模型提取字幕信息
        增强错误处理和进度报告
        :param video_path: 视频文件路径
        :return: 字幕文件路径
        """
        if not os.path.exists(video_path):
            raise FileNotFoundError(f"视频文件不存在: {video_path}")

        client = Util.get_openai_client()
        startupinfo = Util.get_startupinfo()

        # 创建临时文件用于存储提取的音频
        audio_path = video_path.replace(".mp4", ".mp3")
        srt_file_path = video_path.replace(".mp4", ".srt")

        try:
            log_info(f"开始提取音频: {video_path}")
            # 使用 ffmpeg 提取音频

            cmd = ["ffmpeg", "-i", video_path, "-q:a", "0", "-map", "a", audio_path, "-y"]
            subprocess.run(cmd, startupinfo=startupinfo, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8')

            log_info(f"开始转录字幕: {audio_path}")
            # 加载 Whisper 模型进行转录
            with open(audio_path, "rb") as audio_file:
                response = client.audio.transcriptions.create(
                    model="whisper-1",
                    file=audio_file,
                    response_format="srt")

            # 将字幕保存到 .srt 文件
            with open(srt_file_path, "w", encoding="utf-8") as srt_file:
                srt_file.write(response)

            log_info(f"字幕提取成功: {srt_file_path}")
            return srt_file_path

        except Exception as e:
            log_info(f"字幕提取失败: {e}")
            log_error(f"字幕提取错误: {e}")
            raise RuntimeError(f"音频提取或转录失败: {e}")

        finally:
            # 删除临时音频文件
            try:
                if os.path.exists(audio_path):
                    os.remove(audio_path)
            except Exception as e:
                log_info(f"删除临时音频文件时出错: {e}")
                log_error(f"删除临时音频文件时出错: {e}")

    @staticmethod
    def get_openai_client(api_key=None):
        """
        获取OpenAI API客户端的单例实例
        :param api_key: OpenAI API密钥，如果为None则尝试从环境变量获取
        :return: OpenAI客户端实例
        """
        if 'openai_client' not in Util._instances:
            # 如果未提供API密钥，尝试从环境变量获取
            if not api_key:
                api_key = os.environ.get('OPENAI_API_KEY')
                
            if not api_key:
                raise ValueError("OpenAI API密钥未提供，请作为参数传入或设置OPENAI_API_KEY环境变量")
                
            Util._instances['openai_client'] = OpenAI(api_key=api_key)
            
        return Util._instances['openai_client']

    @staticmethod
    def remove_vietnamese_accents(text: str) -> str:
        """
        仅对拉丁字符（包括越南语）去除重音，不影响如泰语等非拉丁语系字符。
        """
        result = []
        for char in unicodedata.normalize('NFD', text):
            # 只对拉丁字符分解后的 Mn 进行过滤
            if unicodedata.category(char) == 'Mn':
                # 检查上一个字符是否是拉丁字母，如果是才去掉当前 Mn
                if result and 'LATIN' in unicodedata.name(result[-1], ''):
                    continue
            result.append(char)

        text = ''.join(result)
        # 特例处理越南语字母 đ/Đ
        text = text.replace('đ', 'd').replace('Đ', 'D').replace(" ", "")
        return text
    
    @staticmethod
    def translate_video_name(text, target_language="VN"):
        """
        使用 ChatGPT 对文本进行翻译，将标题和标签分开处理
        :param text: 要翻译的文本
        :param target_language: 目标语言代码
        :return: 翻译后的文本
        """
        log_info(f"正在翻译: {text}")

        # 如果选择ASCII，则只保留ASCII字符，不进行翻译
        if target_language == "ASCII":
            sanitized_text = ''.join(c for c in text if c.isascii() and c.isprintable() or c == ' ')
            safe_text = re.sub(r'[\\/:*?"<>|]', '_', sanitized_text)  # 替换Windows禁用字符
            log_info(f"已转换为ASCII: {safe_text}")
            return safe_text

        # 语言映射，用于提供明确的语言名称
        language_map = {
            "VN": "Vietnamese",
            "TH": "Thai",
            "EN": "English",
            "ID": "Indonesian",
            "MS": "Malay",
            "ASCII": "English but only using ASCII characters"
        }

        full_language = language_map.get(target_language, target_language)
        
        # 拆分文本为标题和标签
        tags = re.findall(r'#\w+', text)
        title = text
        for tag in tags:
            title = title.replace(tag, '').strip()
        title = re.sub(r'\s+', ' ', title).strip()
        
        # 标签去除#符号
        tag_values = [tag[1:] for tag in tags if len(tag) <= 5]
        
        # 构建JSON结构
        content_json = {
            "title": title,
            "tags": tag_values
        }
        
        log_info(f"待翻译内容解析：{content_json}")

        messages = [
                {
                    "role": "system",
                    "content": (
                        "你是一个专业的翻译助手。你的任务是准确翻译JSON数据中的内容，输出结构必须与输入完全一致，不能添加多余内容。"
                        "翻译后的文本绝对不能包含以下字符：\\ / : * ? \" < > |。"
                    )
                },
                {
                    "role": "user",
                    "content": f"""请将以下JSON中的内容翻译成{full_language}，并保持原有JSON结构返回：

            {json.dumps(content_json, ensure_ascii=False)}

            翻译要求：
            1. "title"字段请完整、准确地翻译原句；
            2. "tags"字段请对每个标签进行含义提炼与简洁翻译，使用常见、通用的短语表达，适合作为话题标签；
            3. 按照以下格式返回结果：{{ title: "翻译后的标题", tags: ["标签1", "标签2"] }}
            4. 结果不能包含任何中文字符或其他非ASCII字符；
            """
                }
            ]


        try:
            response = Util.get_openai_client().chat.completions.create(
                model="gpt-3.5-turbo",
                messages=messages,
                temperature=0.5,
                timeout=60
            )
            translated_content = response.choices[0].message.content.strip()
            log_info(f"翻译结果: {translated_content}")
            # 尝试解析JSON响应
            try:
                # 提取JSON部分
                json_match = re.search(r'{[\s\S]*}', translated_content)
                if json_match:
                    translated_content = json_match.group(0)
                
                translated_json = json.loads(translated_content)
                translated_title = translated_json.get("title", "")
                translated_tags = translated_json.get("tags", [])
                
                # 组合为最终文本
                final_text = translated_title
                if translated_tags:
                    hashtags = " ".join([f"#{Util.remove_vietnamese_accents(tag)}" for tag in translated_tags])
                    final_text = f"{final_text} {hashtags}"
                    
                log_info(f"翻译完成: {final_text}")
                
                # 确保不含Windows不允许的字符，替换为下划线
                sanitized_text = re.sub(r'[\\/:*?"<>|]', '_', final_text)
                
                # 检查是否还包含中文字符
                has_chinese = any('\u4e00' <= char <= '\u9fff' for char in sanitized_text)
                if has_chinese:
                    log_info("翻译结果中包含中文字符，可能是翻译不准确，暂时不处理")
                    return None
                if sanitized_text != final_text:
                    log_info(f"已修正文件名中的非法字符: {sanitized_text}")

                return sanitized_text
                
            except json.JSONDecodeError as e:
                log_error(f"解析JSON响应失败: {e}, 响应内容: {translated_content}")
                # 回退到直接使用翻译结果
                sanitized_text = re.sub(r'[\\/:*?"<>|]', '_', translated_content)
                return sanitized_text

        except Exception as e:
            error_msg = f"调用 ChatGPT 翻译功能时出错: {e}"
            log_info(error_msg)
            log_error(error_msg)
            # 如果翻译失败，返回安全的原文名称
            safe_name = re.sub(r'[\\/:*?"<>|]', '_', text)
            safe_name = ''.join(c for c in safe_name if c.isascii() and c.isprintable() or c == ' ')
            return safe_name or "file_" + datetime.now().strftime("%Y%m%d%H%M%S")

    @staticmethod
    def merge_videos(*video_paths, 
                     output_path=None, 
                     remove_voice=False, 
                     transition_effect,
                     transition_duration=1.0,
                     audio_ids=[],
                     logger=None,
                     ):
        """
        稳定合并多个视频：先转.ts，再合并，避免帧率错乱、时间跳变等问题
        
        :param video_paths: 要合并的视频路径
        :param output_path: 输出视频路径，如果为None则自动生成
        :param remove_voice: 是否移除人声
        :param transition_effect: 过渡效果类型，None表示不使用过渡
        :param transition_duration: 过渡效果持续时间（秒）
        :return: 输出视频路径
        """
        # 导入并使用VideoMerger类
        merger = VideoMerger(startupinfo=Util.get_startupinfo(),
                             transition_duration=transition_duration,
                             transition_effect=transition_effect,
                             logger=logger
                             )
        merge_success = merger.merge_videos_with_transitions(
            *video_paths, 
            output_path=output_path, 
            remove_voice=remove_voice,
        )
        if not merge_success:
            return []
        output_paths = []
        if len(audio_ids) > 0:
            index = 0
            file_dao = DAOFactory.get_instance().get_uploaded_file_dao()
            for audio_id in audio_ids:
                index += 1
                audio = file_dao.get_audio_by_id(audio_id)
                audio_path = audio.audio_path
                subtitle_path = audio.subtitle_path
                log_info(f"正在处理音频: {audio_id}, 路径: {audio_path}, 字幕路径: {subtitle_path}")
                audio_output_path = output_path.replace(".mp4", "_merged_audio_" + str(index) + ".mp4")
                log_info(f"正在合并音频: {audio_id} 到视频: {audio_output_path}")
                merger.merge_video_with_audio(video_path=output_path,
                                        audio_path=audio_path,
                                        subtitle_path=subtitle_path,
                                        output_path=audio_output_path,
                                        )
                output_paths.append({ "video": audio_output_path, "audio": audio_path})
        else:
            output_paths.append({"video": output_path, "audio": None})
        return output_paths
        

    @staticmethod
    def build_description_faiss_index(db_path, embedding_dimension=1536, batch_size=100):
        """
        从SQLite数据库读取videos表的description字段，建立FAISS索引
        
        :param db_path: SQLite数据库路径
        :param embedding_dimension: 嵌入向量维度，默认1536（适用于OpenAI text-embedding-ada-002模型）
        :param batch_size: 批处理大小，控制内存使用
        :return: 包含FAISS索引、描述文本和ID映射的字典
        """
        
        if not os.path.exists(db_path):
            raise FileNotFoundError(f"数据库文件不存在: {db_path}")
        
        # 连接到SQLite数据库
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 检查videos表是否存在description字段
        cursor.execute("PRAGMA table_info(videos)")
        columns = cursor.fetchall()
        if not any(column[1] == 'description' for column in columns):
            conn.close()
            raise ValueError("videos表中不存在description字段")
        
        # 查询videos表获取所有有效的description
        cursor.execute("SELECT id, description FROM videos WHERE description IS NOT NULL AND description != ''")
        results = cursor.fetchall()
        conn.close()
        
        if not results:
            raise ValueError("没有找到有效的description数据")
        
        # 提取ID和描述
        ids = [row[0] for row in results]
        descriptions = [row[1] for row in results]
        
        log_info(f"从数据库读取了 {len(descriptions)} 条描述文本")
        
        # 获取OpenAI客户端用于生成嵌入
        client = Util.get_openai_client()
        
        # 分批获取嵌入以避免API限制和内存问题
        embeddings = []
        total_batches = (len(descriptions) + batch_size - 1) // batch_size
        
        for i in range(0, len(descriptions), batch_size):
            batch = descriptions[i:i+batch_size]
            log_info(f"正在处理批次 {i//batch_size + 1}/{total_batches} ({len(batch)} 条描述)")
            
            try:
                # 获取这批描述的嵌入
                response = client.embeddings.create(
                    model="text-embedding-ada-002",
                    input=batch,
                    encoding_format="float"
                )
                
                batch_embeddings = [item.embedding for item in response.data]
                embeddings.extend(batch_embeddings)
                log_info(f"成功获取批次 {i//batch_size + 1} 的嵌入向量")
                
            except Exception as e:
                log_error(f"获取嵌入向量时发生错误: {e}")
                raise RuntimeError(f"无法获取描述的嵌入向量: {e}")
        
        # 创建嵌入向量的numpy数组
        embedding_array = np.array(embeddings, dtype=np.float32)
        
        # 创建FAISS索引
        index = faiss.IndexFlatL2(embedding_dimension)
        index.add(embedding_array)
        
        log_info(f"成功创建FAISS索引，包含 {index.ntotal} 个向量")
        
        return {
            "index": index,
            "texts": descriptions,
            "ids": ids,
            "embedding_array": embedding_array
        }

    @staticmethod
    def search_faiss_index(index_data, query_text, top_k=5):
        """
        在FAISS索引中搜索与查询文本最相似的条目
        
        :param index_data: 从build_description_faiss_index返回的索引数据
        :param query_text: 查询文本
        :param top_k: 返回的最相似结果数量
        :return: 包含最相似文本、分数和ID的列表
        """
        
        if not index_data or "index" not in index_data:
            raise ValueError("无效的索引数据")
        
        # 获取查询文本的嵌入向量
        client = Util.get_openai_client()
        try:
            response = client.embeddings.create(
                model="text-embedding-ada-002",
                input=[query_text],
                encoding_format="float"
            )
            query_embedding = np.array([response.data[0].embedding], dtype=np.float32)
        except Exception as e:
            log_error(f"获取查询文本嵌入向量时发生错误: {e}")
            raise RuntimeError(f"无法获取查询文本的嵌入向量: {e}")
        
        # 在索引中搜索最相似的向量
        distances, indices = index_data["index"].search(query_embedding, min(top_k, len(index_data["texts"])))
        
        # 整理结果
        results = []
        for i, idx in enumerate(indices[0]):
            if idx < len(index_data["texts"]):
                results.append({
                    "id": index_data["ids"][idx],
                    "text": index_data["texts"][idx],
                    "score": float(distances[0][i])
                })
        
        return results

    @staticmethod
    def init_db():
        DBManager().init_db()
        
    @staticmethod
    def move_video_file(source_path, target_path, log_callback=log_info):
        """
        移动视频文件并更新数据库中的路径
        Args:
            source_path: 源文件路径
            target_path: 目标文件路径
            log_callback: 日志回调函数
        
        Returns:
            tuple: (成功与否, 新的文件路径)
        """
        # 确保源文件存在
        if not os.path.exists(source_path):
            if log_callback:
                log_callback(f"源文件不存在: {source_path}")
            return False, source_path
            
        # 确保目标目录存在
        target_dir = os.path.dirname(target_path)
        os.makedirs(target_dir, exist_ok=True)
        
        try:
            # 移动文件
            shutil.move(source_path, target_path)
            
            # 更新数据库中的路径 - 延迟导入避免循环依赖
                # 只在需要时导入VideoDAO
            success = DAOFactory.get_instance().get_video_dao().update_local_path(source_path, target_path)
            if log_callback:
                log_callback(f"文件已移动: {source_path} → {target_path}")
            
            if success:
                log_callback(f"数据库更新成功: {source_path} → {target_path}")
            
            return True, target_path
        except Exception as e:
            raise RuntimeError(f"无法移动文件: {e}")


    @staticmethod
    def export_copywritings_to_excel(data_list, output_path=None):
        """
        将文案数据导出到Excel
        
        :param data_list: 数据列表，每个元素是包含video_name和copywriting的字典
        :param output_path: 导出路径，为None时自动生成
        :return: 导出结果
        """
        try:
            import pandas as pd
            import os
            from openpyxl.styles import Font, Alignment
            from src.utils.logger import log_info, log_error
            from src.modules.file_manager import FileManager
            
            log_info("开始导出Excel...")
            
            # 创建DataFrame
            df = pd.DataFrame(data_list)
            
            # 如果未指定输出路径，创建临时文件
            if output_path is None:
                file_manager = FileManager()
                temp_file = file_manager.create_temp_file(
                    prefix="copywriting_export_",
                    suffix=".xlsx"
                )
                output_path = temp_file.path
                
            # 确保输出目录存在
            output_dir = os.path.dirname(output_path)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)
                
            # 导出到Excel
            with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
                df.to_excel(writer, index=False, sheet_name="文案")
                
                # 调整列宽
                workbook = writer.book
                worksheet = writer.sheets["文案"]
                
                # 设置视频名称列宽
                worksheet.column_dimensions['A'].width = 30
                
                # 设置文案内容列宽
                worksheet.column_dimensions['B'].width = 80
                
                # 设置标题格式
                header_font = Font(bold=True, size=12)
                header_alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
                
                for cell in worksheet[1]:
                    cell.font = header_font
                    cell.alignment = header_alignment
                    
                # 设置文案内容单元格格式 - 自动换行
                content_alignment = Alignment(vertical='top', wrap_text=True)
                
                # 从第二行开始遍历B列（文案内容）
                for row in range(2, len(data_list) + 2):
                    worksheet.cell(row=row, column=2).alignment = content_alignment
                    # 调整行高
                    worksheet.row_dimensions[row].height = 120
            
            log_info(f"Excel导出成功: {output_path}")
            
            return {
                "success": True,
                "message": "Excel导出成功",
                "file_path": output_path
            }
            
        except Exception as e:
            error_message = f"Excel导出失败: {str(e)}"
            log_error(error_message)
                
            return {
                "success": False,
                "message": error_message
            }

    @staticmethod
    def get_custom_files( folder_path):
        results = []
        for path in [p for p in os.listdir(folder_path) if p.lower().endswith('.mp4')]:
            file_path = os.path.join(folder_path, path)
            file_info = {
                "video_id": file_path,
                "description": os.path.basename(file_path),
                "category": "自定义",
                "score": 100,
                "local_path": file_path,
                "downloaded_at": 0,
                "duration": 0,  # 默认时长为0
                "username": "自定义",
            }
            results.append(file_info)
        return results
