from src.ui.i18n import _
from PySide6.QtCore import QThread, Signal
from openai import OpenAI, APIConnectionError, RateLimitError, APIStatusError, AuthenticationError


class ConnectionTestWorker(QThread):
    finished = Signal(bool, str)

    def __init__(self, base_url, api_key):
        super().__init__()
        self.base_url = base_url
        self.api_key = api_key

    def run(self):
        try:
            client = OpenAI(api_key=self.api_key, base_url=self.base_url, max_retries=1)
            response = client.models.list()
            model_count = len(response.data)
            message = _("Connection successful!\n\nThe provider returned {} models.").format(model_count)
            self.finished.emit(True, message)
        except AuthenticationError:
            self.finished.emit(False, _("Invalid API key or insufficient permissions. Please check your configuration."))
        except APIConnectionError:
            self.finished.emit(False, _("Unable to connect to the API service. Please check your network and Base URL."))
        except RateLimitError:
            self.finished.emit(False, _("Request rate limit exceeded (429). Please try again later or check your API quota."))
        except APIStatusError as e:
            self.finished.emit(False, _("API returned an error status {}: {}").format(e.status_code, getattr(e, 'message', str(e))))
        except Exception as e:
            self.finished.emit(False, _("An unknown error occurred: {}").format(str(e)))

def test_openai_connection(base_url, api_key):
    """
    Creates and returns a worker to test the OpenAI connection.
    """
    return ConnectionTestWorker(base_url, api_key)

class ModelFetchWorker(QThread):
    finished = Signal(bool, object) # bool: success, object: list of models or error message str

    def __init__(self, base_url, api_key):
        super().__init__()
        self.base_url = base_url
        self.api_key = api_key

    def run(self):
        try:
            client = OpenAI(api_key=self.api_key, base_url=self.base_url, max_retries=1)
            models = client.models.list().data
            self.finished.emit(True, models)
        except AuthenticationError:
            self.finished.emit(False, _("Invalid API key or insufficient permissions. Please check your configuration."))
        except APIConnectionError:
            self.finished.emit(False, _("Unable to connect to the API service. Please check your network and Base URL."))
        except RateLimitError:
            self.finished.emit(False, _("Request rate limit exceeded (429). Please try again later or check your API quota."))
        except APIStatusError as e:
            self.finished.emit(False, _("API returned an error status {}: {}").format(e.status_code, getattr(e, 'message', str(e))))
        except Exception as e:
            self.finished.emit(False, _("An unknown error occurred: {}").format(str(e)))

def fetch_models(base_url, api_key):
    """
    Creates and returns a worker to fetch models from an OpenAI-compatible endpoint.
    """
    return ModelFetchWorker(base_url, api_key)


class ModelTestWorker(QThread):
    finished = Signal(bool, str)

    def __init__(self, base_url, api_key, model_name):
        super().__init__()
        self.base_url = base_url
        self.api_key = api_key
        self.model_name = model_name

    def run(self):
        try:
            client = OpenAI(api_key=self.api_key, base_url=self.base_url, max_retries=1)
            client.chat.completions.create(
                model=self.model_name,
                messages=[{"role": "user", "content": "Hello"}],
                max_tokens=5
            )
            self.finished.emit(True, _("Model test successful!"))
        except AuthenticationError:
            self.finished.emit(False, _("Invalid API key or insufficient permissions. Please check your configuration."))
        except APIConnectionError:
            self.finished.emit(False, _("Unable to connect to the API service. Please check your network and Base URL."))
        except RateLimitError:
            self.finished.emit(False, _("Request rate limit exceeded (429). Please try again later or check your API quota."))
        except APIStatusError as e:
            # Specifically handle "model not found"
            if e.status_code == 404:
                 self.finished.emit(False, _("Model not found. Please ensure the Model ID is correct and supported by the provider."))
            else:
                self.finished.emit(False, _("API returned an error status {}: {}").format(e.status_code, getattr(e, 'message', str(e))))
        except Exception as e:
            self.finished.emit(False, _("An unknown error occurred: {}").format(str(e)))

def test_model_connection(base_url, api_key, model_name):
    """
    Creates and returns a worker to test a specific LLM model.
    """
    return ModelTestWorker(base_url, api_key, model_name)