import re
import json
import os
import logging
import time
import threading
from src.ui.i18n import _
from concurrent.futures import ThreadPoolExecutor, as_completed, wait, FIRST_COMPLETED, CancelledError
from openai import OpenAI, APIConnectionError, RateLimitError, APIStatusError, AuthenticationError
from .base_translator import BaseTranslator
from src.config import app_config
from src.utils.translation_cache import TranslationCache

logger = logging.getLogger(__name__)


class TranslationError(Exception):
    def __init__(self, message, errors=None):
        super().__init__(message)
        self.errors = errors or []

    def __str__(self):
        if not self.errors:
            return super().__str__()

        error_details = "\n".join([f"  - {err}" for err in self.errors])
        return f"{super().__str__()}\n\n{_('Detailed error information:')}\n{error_details}"


class OpenAITranslator(BaseTranslator):
    _semaphores = {}
    _semaphore_lock = threading.Lock()

    def __init__(self):
        super().__init__()
        self.clients = {}  # Cache clients by base_url
        self.cache = TranslationCache()

    def _get_client_for_model(self, model_config):
        base_url = model_config.get("base_url")
        if not base_url:
            raise ValueError("base_url not found in model configuration.")

        if base_url in self.clients:
            return self.clients[base_url]

        api_key = model_config.get("api_key")

        if not api_key:
            raise ValueError(
                f"API key for model '{model_config.get('display_name')}' is not configured.")

        try:
            client = OpenAI(api_key=api_key, base_url=base_url,
                            max_retries=3, timeout=60.0)
            self.clients[base_url] = client
            logger.debug(
                f"Successfully created OpenAI client for '{base_url}'.")
            return client
        except Exception as e:
            logger.error(
                f"Failed to create OpenAI client for '{base_url}': {e}")
            raise

    @staticmethod
    def get_name():
        return "OpenAI"

    def segment_and_translate(self, texts, source_lang, target_lang, model_config, translation_style="常规", video_type="常规", stream_callback=None, is_cancelled=None):
        is_cancelled = is_cancelled or (lambda: False)
        if not texts:
            return []

        try:
            client = self._get_client_for_model(model_config)
            model_name = model_config.get("model_name")
        except (ValueError, KeyError) as e:
            logger.error(f"Failed to get OpenAI client or model config for segmentation: {e}")
            raise TranslationError(_("Segmentation failed: {}").format(e))

        source_lang_text = "English" if source_lang.lower() == 'auto' else source_lang

        system_prompt = (
            "You are a professional subtitle translator and editor. Your task is to translate and re-segment text for subtitles while adapting the tone and style to match the video context. Follow these rules precisely.\n\n"
            "**Task:**\n"
            "1. Input text fragments are separated by `|||`.\n"
            "2. Combine the fragments into a coherent text.\n"
            "3. Translate the combined text from {source_lang} to {target_lang}.\n"
            "4. Adapt the translation style to '{translation_style}' for a '{video_type}' type video.\n"
            "5. Split the translation into short, natural sentences.\n"
            "6. Return the sentences separated by `|||`.\n\n"
            "**Style Guidelines for '{translation_style}' in '{video_type}':**\n"
            "- Actively use this style and tone throughout the translation\n"
            "- Employ appropriate language, humor, or formality level for this video type\n"
            "- Retain the original meaning while making it engaging for the target audience\n"
            "- Do not add, invent, or infer content beyond what's needed for style adaptation\n\n"
            "**Output Requirements:**\n"
            "- Provide ONLY the translated and re-segmented text\n"
            "- Use `|||` as the separator between sentences\n"
            "- Do not include any explanations, notes, or markdown\n"
            "- Ensure smooth, natural flow while preserving the original message"
        ).format(source_lang=source_lang_text, target_lang=target_lang, translation_style=translation_style, video_type=video_type)

        user_prompt = "|||".join(texts)



        model_kwargs = {
            "model": model_name,
            "messages": [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
            "temperature": 0.1,
            "stream": False,
            "timeout": 120,
        }

        # Debug: Print the actual request being sent
        logger.debug(f"=== DEBUG REQUEST ===")
        logger.debug(f"Model: {model_name}")
        logger.debug(f"Video Type: {video_type}")
        logger.debug(f"Translation Style: {translation_style}")
        logger.debug(f"System Prompt:\n{system_prompt}")
        logger.debug(f"User Prompt:\n{user_prompt}")
        logger.debug(f"=== END DEBUG REQUEST ===")

        max_retries = 2
        for attempt in range(max_retries + 1):
            if is_cancelled():
                raise CancelledError()
            try:
                if stream_callback:
                    stream_callback(_("\n[Info] Sending batch for segmentation and translation (Attempt {})...  ").format(attempt + 1))

                completion = client.chat.completions.create(**model_kwargs)
                full_response = completion.choices[0].message.content
                logger.info(f"Raw model response for segmentation: {full_response}")

                if stream_callback:
                    stream_callback(full_response + "\n")

                if full_response:
                    segments = [seg.strip().replace('\n', ' ') for seg in full_response.strip().split("|||") if seg.strip()]
                    logger.info(_("✅ Segmentation and translation successful for batch on attempt {}.").format(attempt + 1))
                    return segments
                else:
                    logger.warning(_("Segmentation task returned an empty response on attempt {}.").format(attempt + 1))
                    if attempt < max_retries:
                        time.sleep(2)
                        continue
                    else:
                        raise TranslationError(_("Segmentation failed after multiple retries due to empty responses."))

            except CancelledError:
                raise
            except Exception as e:
                logger.error(_("❌ Critical error during segmentation of batch on attempt {}: {}").format(attempt + 1, e))
                if attempt < max_retries:
                    time.sleep(2)
                else:
                    self._handle_api_error(e, stream_callback, context=_("Segmentation and Translation"))
                    raise TranslationError(_("Segmentation failed after multiple retries.")) from e
        return []

    def translate(self, texts, source_lang, target_lang, model_config, video_type="常规", translation_style="常规", no_think=False, batch_start_callback=None, batch_done_callback=None, stream_callback=None, is_cancelled=None):
        is_cancelled = is_cancelled or (lambda: False)

        try:
            client = self._get_client_for_model(model_config)
            model_name = model_config.get("model_name")
            display_name = model_config.get("display_name", "Unknown Model")
        except (ValueError, KeyError) as e:
            logger.error(f"Failed to get OpenAI client or model config: {e}")
            raise TranslationError(_("Translation failed: {}").format(e))

        # 1. Check cache for existing translations
        all_translated_texts = [None] * len(texts)
        texts_to_process = []
        logger.info(
            _("Checking translation cache for {} texts...").format(len(texts)))
        for i, text in enumerate(texts):
            if is_cancelled():
                return []
            cached_result = self.cache.get(
                text, self.get_name(), model_name, source_lang, target_lang)
            if cached_result is not None:
                all_translated_texts[i] = cached_result
            else:
                texts_to_process.append({'text': text, 'original_index': i})

        cached_count = len(texts) - len(texts_to_process)
        logger.info(_("Cache check complete. Found {} cached translations, {} texts to translate.").format(
            cached_count, len(texts_to_process)))

        if not texts_to_process:
            logger.info(
                _("=== All translations found in cache, task completed. ==="))
            if batch_start_callback:
                batch_start_callback(1, 1)
            if batch_done_callback:
                batch_done_callback(1, 1, cached_count)
            return all_translated_texts

        # 2. Prepare for batch translation of remaining texts
        base_url = model_config.get("base_url")
        service_config = app_config.get_service_config_by_url(base_url)
        # Default to 5 if not specified
        rpm_limit = service_config.get("rpm_limit", 5) if service_config else 5

        with OpenAITranslator._semaphore_lock:
            if base_url not in OpenAITranslator._semaphores:
                OpenAITranslator._semaphores[base_url] = threading.Semaphore(
                    rpm_limit)
                logger.info(_("Request semaphore for {} initialized with a limit of {}.").format(
                    base_url, rpm_limit))

        max_lines_per_batch = app_config.get(
            'translation.max_lines_per_batch', 50)
        max_chars_per_batch = app_config.get(
            'translation.max_chars_per_batch', 4000)

        max_workers = rpm_limit * 2

        logger.info(
            _("=== Starting parallel translation task for remaining texts ==="))
        logger.info(
            _("  - Using model: {} ({})").format(display_name, model_name))
        logger.info(
            _("  - Service endpoint: {} (RPM Limit: {})").format(base_url, rpm_limit))

        texts_for_batching = [item['text'] for item in texts_to_process]
        batches = self._create_batches_by_line_and_char(
            texts_for_batching, max_lines_per_batch, max_chars_per_batch)

        # Map batch indices back to original text indices
        for batch in batches:
            batch['original_indices'] = [texts_to_process[i]['original_index']
                                         for i in batch['original_indices']]

        total_batches = len(batches)
        logger.info(
            _("  - Created {} batches for {} texts.").format(total_batches, len(texts_to_process)))

        errors = []

        # 3. Execute translation in parallel
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_batch = {
                executor.submit(self._translate_batch_parallel, client, model_name, batch, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, texts): batch
                for batch in batches
            }

            pending_futures = set(future_to_batch.keys())
            processed_batches = 0

            if batch_start_callback:
                batch_start_callback(0, total_batches)

            while pending_futures:
                if is_cancelled():
                    for f in pending_futures:
                        f.cancel()
                    break

                done, pending_futures = wait(
                    pending_futures, timeout=0.5, return_when=FIRST_COMPLETED)
                if not done:
                    continue

                for future in done:
                    batch_info = future_to_batch[future]
                    if batch_start_callback:
                        batch_start_callback(
                            processed_batches + 1, total_batches)

                    try:
                        batch_translations = future.result()
                        for i, translated_text in enumerate(batch_translations):
                            original_index = batch_info['original_indices'][i]
                            all_translated_texts[original_index] = translated_text

                            # Set cache for the newly translated text
                            original_text = batch_info['texts'][i]
                            self.cache.set(original_text, translated_text, self.get_name(
                            ), model_name, source_lang, target_lang)

                    except CancelledError:
                        logger.info(_("Batch {} was successfully cancelled.").format(
                            batch_info['batch_index']))
                        for i, text in enumerate(batch_info['texts']):
                            all_translated_texts[batch_info['original_indices'][i]] = _(
                                "[Translation cancelled] {}").format(text)
                    except Exception as e:
                        error_message = _("Batch {} translation failed: {}").format(
                            batch_info['batch_index'], e)
                        logger.error(f"❌ {error_message}")
                        errors.append(error_message)
                        for i, text in enumerate(batch_info['texts']):
                            all_translated_texts[batch_info['original_indices'][i]] = _(
                                "[Translation failed] {}").format(text)

                    processed_batches += 1
                    if batch_done_callback:
                        batch_size = len(batch_info['texts'])
                        batch_done_callback(
                            processed_batches, total_batches, batch_size)

            for f in pending_futures:
                f.cancel()

        if is_cancelled():
            return []

        if errors:
            raise TranslationError(
                _("Encountered {} errors during translation.").format(len(errors)), errors)

        logger.info(_("=== Translation task completed ==="))
        return all_translated_texts

    def _create_batches_by_line_and_char(self, texts, max_lines, max_chars):
        batches = []
        current_batch_texts = []
        current_batch_indices = []
        current_chars = 0

        for i, text in enumerate(texts):
            if current_batch_texts and \
               (len(current_batch_texts) >= max_lines or current_chars + len(text) > max_chars):
                batches.append({
                    "batch_index": len(batches),
                    "texts": current_batch_texts,
                    "original_indices": current_batch_indices,
                })
                current_batch_texts, current_batch_indices, current_chars = [], [], 0

            current_batch_texts.append(text)
            current_batch_indices.append(i)
            current_chars += len(text)

        if current_batch_texts:
            batches.append({
                "batch_index": len(batches),
                "texts": current_batch_texts,
                "original_indices": current_batch_indices,
            })

        return batches

    def _translate_batch_parallel(self, client, model_name, batch_info, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, all_texts):
        base_url = client.base_url

        with OpenAITranslator._semaphore_lock:
            if base_url not in OpenAITranslator._semaphores:
                service_config = app_config.get_service_config_by_url(base_url)
                rpm_limit = service_config.get(
                    "rpm_limit", 5) if service_config else 5
                OpenAITranslator._semaphores[base_url] = threading.Semaphore(
                    rpm_limit)
                logger.warning(
                    f"Semaphore for {base_url} was initialized late (in a worker thread). This might indicate a race condition. Limit set to {rpm_limit}.")

        request_semaphore = OpenAITranslator._semaphores[base_url]

        with request_semaphore:
            if is_cancelled():
                raise CancelledError()

            max_retries = 2
            for attempt in range(max_retries + 1):
                if is_cancelled():
                    raise CancelledError()
                try:
                    batch_translations = self._translate_batch(
                        client, model_name, batch_info, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, all_texts)

                    if len(batch_translations) == len(batch_info['texts']):
                        logger.info(_("✅ Batch {} batch translation successful on attempt {}").format(
                            batch_info['batch_index'], attempt + 1))
                        return batch_translations

                    logger.warning(_("Batch {} line count mismatch on attempt {}. Expected {}, got {}.").format(
                        batch_info['batch_index'], attempt + 1, len(batch_info['texts']), len(batch_translations)))
                    if attempt < max_retries:
                        logger.info(
                            _("Retrying batch {}...").format(batch_info['batch_index']))
                        if stream_callback:
                            stream_callback(_("\n[Warning] Batch {} line count mismatch, retrying ({}/{})\n").format(
                                batch_info['batch_index'], attempt + 1, max_retries))
                        time.sleep(2)

                except Exception as e:
                    logger.error(_("❌ Critical error during translation of batch {} on attempt {}: {}").format(
                        batch_info['batch_index'], attempt + 1, e))
                    if attempt < max_retries:
                        logger.info(
                            _("Retrying batch {}...").format(batch_info['batch_index']))
                        if stream_callback:
                            stream_callback(_("\n[Error] Batch {} failed, retrying ({}/{})\n").format(
                                batch_info['batch_index'], attempt + 1, max_retries))
                        time.sleep(2)
                    else:
                        logger.error(
                            _("❌ Batch {} failed after all retries.").format(batch_info['batch_index']))
                        raise e

            logger.warning(
                _("Batch {} failed after all retries, falling back to sentence-by-sentence translation.").format(batch_info['batch_index']))
            if stream_callback:
                stream_callback(
                    _("\n[Warning] Batch {} failed after all retries, automatically falling back to sentence-by-sentence mode...\n").format(batch_info['batch_index']))

            return self._translate_sentence_by_sentence(client, model_name, batch_info, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, all_texts)

    def _translate_batch(self, client, model_name, batch_info, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, all_texts):
        if is_cancelled():
            return []

        batch_index = batch_info['batch_index']
        batch_texts = batch_info['texts']
        log_prefix = _("[Batch {}]").format(batch_index)

        input_dict = {}
        for i, text in enumerate(batch_texts):
            original_index = batch_info['original_indices'][i]
            prev_text = all_texts[original_index - 1] if original_index > 0 else None
            next_text = all_texts[original_index + 1] if original_index + 1 < len(all_texts) else None
            
            input_dict[str(i + 1)] = {
                "prev_context": prev_text or "",
                "text_to_translate": text,
                "next_context": next_text or ""
            }

        input_json_str = json.dumps(input_dict, ensure_ascii=False, indent=2)
        user_prompt = f"/no_think\n{input_json_str}" if no_think else input_json_str
        source_lang_text = _("Auto-detect") if source_lang.lower() == 'auto' else source_lang

        system_prompt = (
            "You are a professional subtitle translator with expertise in adapting content to specific video contexts. You will be given a JSON object where each key maps to an object with 'text_to_translate' and its surrounding context. "
            "Your task is to translate the 'text_to_translate' from {source_lang_text} to {target_lang} while adapting the tone and style to match a '{video_type}' video with a '{translation_style}' style. "
            "Use 'prev_context' and 'next_context' to ensure your translation is fluent and accurately connected with the surrounding dialogue, intelligently handling broken or incomplete sentences. "
            "Adapt the translation style actively: employ appropriate language, humor, or formality level for this video type while retaining the original meaning. "
            "Your output MUST be a valid JSON object with keys identical to the input. "
            "The value for each key must be ONLY the translated string of the corresponding 'text_to_translate'. "
            "Do not add any explanations or extra text outside of the JSON object."
        ).format(source_lang_text=source_lang_text, target_lang=target_lang, video_type=video_type, translation_style=translation_style)
        
        temperature = 0.2

        model_kwargs = {
            "model": model_name or "gpt-3.5-turbo",
            "messages": [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
            "temperature": temperature,
            "stream": True,
            "response_format": {"type": "json_object"},
            "timeout": 60,
        }

        # Debug: Print the actual request being sent
        logger.debug("=== DEBUG BATCH REQUEST ===")
        logger.debug(f"Model: {model_name}")
        logger.debug(f"Video Type: {video_type}")
        logger.debug(f"Translation Style: {translation_style}")
        logger.debug(f"System Prompt:\n{system_prompt}")
        logger.debug(f"User Prompt:\n{user_prompt}")
        logger.debug("=== END DEBUG BATCH REQUEST ===")

        try:
            stream = client.chat.completions.create(**model_kwargs)
            full_response = ""
            if stream_callback:
                stream_callback( _("{} Receiving model response: ").format(log_prefix))

            for chunk in stream:
                if is_cancelled():
                    stream.close()
                    return []
                content = chunk.choices[0].delta.content or ""
                full_response += content
                if stream_callback:
                    stream_callback(content)

            if stream_callback:
                stream_callback("\n")

            try:
                match = re.search(r"```json\s*(\{.*?\})\s*```", full_response, re.DOTALL)
                if match:
                    json_str = match.group(1)
                else:
                    # If no markdown block is found, assume the whole response is the JSON string.
                    # This is stricter and avoids parsing partial/incorrect JSON from a conversational response.
                    json_str = full_response
                
                response_dict = json.loads(json_str)

                translated_texts = []
                is_complete_and_translated = True
                for i in range(len(batch_texts)):
                    key = str(i + 1)
                    original_text = batch_texts[i]
                    translated_text = response_dict.get(key)

                    if not translated_text or translated_text.strip() == original_text.strip():
                        is_complete_and_translated = False
                        if not translated_text:
                            logger.warning( _("{} Key '{}' missing or empty in translation response.").format(log_prefix, key))
                        else:
                            logger.warning(_("{} Translation for key '{}' is identical to the original text. Original: '{}'").format(
                                log_prefix, key, original_text))
                        break

                    translated_texts.append(translated_text)

                if is_complete_and_translated:
                    return translated_texts
                else:
                    logger.warning( _("{} Translation response is incomplete or contains untranslated text. Triggering retry/fallback.").format(log_prefix))
                    return []
            except (json.JSONDecodeError, TypeError) as e:
                logger.warning( _("{} JSON processing failed: {}").format(log_prefix, e))
                return []
        except (RateLimitError, APIStatusError, AuthenticationError, APIConnectionError) as e:
            self._handle_api_error(e, stream_callback, context=_( "Batch {} translation").format(batch_index))
            raise
        except Exception as e:
            self._handle_api_error(e, stream_callback, context=_( "Batch {} translation").format(batch_index))
            raise
        return []

    def _translate_sentence_by_sentence(self, client, model_name, batch_info, source_lang, target_lang, video_type, translation_style, no_think, stream_callback, is_cancelled, all_texts):
        if is_cancelled():
            return []

        batch_index = batch_info['batch_index']
        batch_texts = batch_info['texts']
        log_prefix = _("[Batch {}]").format(batch_index)
        final_translations = []
        for idx, text in enumerate(batch_texts):
            if is_cancelled():
                break
            if stream_callback:
                stream_callback( _("  - {} Translating line {}/{}: ").format(log_prefix, idx+1, len(batch_texts)))

            original_index = batch_info['original_indices'][idx]
            prev_text = all_texts[original_index - 1] if original_index > 0 else None
            next_text = all_texts[original_index + 1] if original_index + 1 < len(all_texts) else None

            source_lang_text = _(
                "Auto-detect") if source_lang.lower() == 'auto' else source_lang
            system_prompt = (
                "You are a professional translator for film and television subtitles with expertise in adapting content to specific video contexts. Your task is to translate the following text from [Source Language] to [Target Language].\n\n"
                "Source Language: {source_lang_text}\n"
                "Target Language: {target_lang}\n"
                "Video Type: {video_type}\n"
                "Translation Style: {translation_style}\n\n"
                "To ensure a fluent and natural translation, please use the provided context. Intelligently handle broken or incomplete sentences.\n"
                "Previous sentence (for context): {prev_context}\n"
                "Next sentence (for context): {next_context}\n\n"
                "Please follow these rules:\n"
                "1. Translate ONLY the 'Text to Translate' below.\n"
                "2. Your translation must flow naturally with the context provided.\n"
                "3. Actively adapt the translation style to '{translation_style}' for a '{video_type}' type video.\n"
                "4. Employ appropriate language, humor, or formality level for this video type.\n"
                "5. Retain the original meaning while making it engaging for the target audience.\n"
                "6. Your output must ONLY be the translated text, with no extra explanations or commentary."
            ).format(
                source_lang_text=source_lang_text,
                target_lang=target_lang,
                video_type=video_type,
                translation_style=translation_style,
                prev_context=prev_text or _("None"),
                next_context=next_text or _("None")
            )
            user_prompt = f"Text to Translate:\n---\n{text}"

            try:
                # Debug: Print the actual request being sent
                logger.debug(f"=== DEBUG SENTENCE REQUEST ===")
                logger.debug(f"Model: {model_name}")
                logger.debug(f"Video Type: {video_type}")
                logger.debug(f"Translation Style: {translation_style}")
                logger.debug(f"System Prompt:\n{system_prompt}")
                logger.debug(f"User Prompt:\n{user_prompt}")
                logger.debug(f"=== END DEBUG SENTENCE REQUEST ===")

                stream = client.chat.completions.create(
                    model=model_name or "gpt-3.5-turbo",
                    messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
                    temperature=0.3,
                    stream=True,
                    timeout=60
                )

                full_response = ""
                for chunk in stream:
                    if is_cancelled():
                        stream.close()
                        break
                    content = chunk.choices[0].delta.content or ""
                    full_response += content
                    if stream_callback:
                        stream_callback(content)

                if is_cancelled():
                    continue

                final_translations.append(full_response.strip())
                if stream_callback:
                    stream_callback("\n")

            except (RateLimitError, APIStatusError, AuthenticationError, APIConnectionError) as e:
                self._handle_api_error(e, stream_callback, context=_( "Batch {} sentence-by-sentence translation (Line {})").format(batch_index, idx+1))
                raise e
            except Exception as e:
                self._handle_api_error(e, stream_callback, context=_( "Batch {} sentence-by-sentence translation (Line {})").format(batch_index, idx+1))
                raise e

        return final_translations

    def _handle_api_error(self, e, stream_callback, context=""):
        error_type = type(e).__name__
        user_message = ""

        if isinstance(e, RateLimitError):
            user_message = _(
                "Request rate limit exceeded (429). Please try again later or check your API quota.")
        elif isinstance(e, AuthenticationError):
            user_message = _(
                "Invalid API key or insufficient permissions. Please check your configuration.")
        elif isinstance(e, APIStatusError):
            user_message = _("API returned an error status {}: {}").format(
                e.status_code, getattr(e, 'message', str(e)))
        elif isinstance(e, APIConnectionError):
            user_message = _(
                "Unable to connect to the API service. Please check your network and API address.")
        else:
            user_message = _("An unknown error occurred: {}").format(str(e))

        logger.error(_("❌ {context} encountered a {error_type} error: {user_message}").format(
            context=context, error_type=error_type, user_message=user_message), exc_info=True)

        if stream_callback:
            context_text = _("{} failed").format(
                context) if context else _("Operation failed")
            stream_callback(_("\n[Error] {}: {}\n").format(
                context_text, user_message))
