import json
from app.llm_client import client
from app import config

def parse_recipe_text(text: str):
    """
    Calls a configured LLM to parse unstructured recipe text into a structured JSON format.
    """
    prompt = f"""
    You are a master chef's assistant. Your task is to parse the user's description of a dish they cooked.
    Extract the following information and return it as a valid JSON object:
    1. "dish_name": The name of the dish.
    2. "ingredients": A list of ingredients used.
    3. "method": A list of steps in the cooking process.
    4. "comment": The user's opinion or their family's feedback on the dish.
    5. "self_critique": The user's self-reflection or notes for improvement for next time.

    User's text: "{text}"

    JSON output:
    """

    try:
        response = client.chat.completions.create(
            model=config.OPENAI_MODEL,
            messages=[
                {"role": "system", "content": "You are a helpful assistant designed to output JSON."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.5,
        )
        
        response_text = response.choices[0].message.content
        
        # Clean the response to ensure it's a valid JSON string
        # The model sometimes wraps the JSON in ```json ... ```
        if response_text.startswith("```json"):
            response_text = response_text[7:-4]

        return json.loads(response_text)

    except Exception as e:
        print(f"Error calling LLM or parsing JSON: {e}")
        # In case of an error, return a structured error message
        return {
            "error": "Failed to parse recipe.",
            "details": str(e)
        }

