from typing import Any
import json
from fastapi import APIRouter, Depends, HTTPException, Request, Header
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session
import google.generativeai as genai
from google.generativeai.types import GenerateContentResponse

from ... import crud
from ...database import get_db
from ...config import settings

router = APIRouter()

# Configure the Gemini API key for the genai SDK
if settings.gemini_api_key:
    genai.configure(api_key=settings.gemini_api_key)

async def get_api_key(authorization: str = Header(None), x_goog_api_key: str = Header(None)):
    print(f"--- [AIProxy Debug] --- Authorization Header: {authorization}")
    print(f"--- [AIProxy Debug] --- x-goog-api-key Header: {x_goog_api_key}")

    if x_goog_api_key:
        print(f"--- [AIProxy Debug] --- Extracted API Key from x-goog-api-key: {x_goog_api_key}")
        return x_goog_api_key

    if authorization and authorization.startswith("Bearer "):
        api_key = authorization.split(" ")[1]
        print(f"--- [AIProxy Debug] --- Extracted API Key from Authorization: {api_key}")
        return api_key

    raise HTTPException(status_code=401, detail="Invalid or missing API Key. Provide it in 'Authorization: Bearer <key>' or 'x-goog-api-key: <key>' header.")

def _convert_generate_content_response_to_dict(response: GenerateContentResponse) -> dict:
    """
    Manually converts the GenerateContentResponse object from the genai SDK
    to a dictionary that mimics the original REST API JSON response.
    """
    result = {}
    if hasattr(response, 'candidates') and response.candidates:
        result['candidates'] = []
        for cand in response.candidates:
            c = {
                'index': getattr(cand, 'index', 0),
                'content': {
                    'role': getattr(cand.content, 'role', 'model'),
                    'parts': [{'text': p.text} for p in getattr(cand.content, 'parts', [])]
                },
                'finishReason': getattr(cand.finish_reason, 'name', 'FINISH_REASON_UNSPECIFIED'),
                'safetyRatings': [{'category': r.category.name, 'probability': r.probability.name} for r in getattr(cand, 'safety_ratings', [])],
                'tokenCount': getattr(cand, 'token_count', 0)
            }
            result['candidates'].append(c)

    if hasattr(response, 'prompt_feedback') and response.prompt_feedback:
        result['promptFeedback'] = {
            'safetyRatings': [{'category': r.category.name, 'probability': r.probability.name} for r in getattr(response.prompt_feedback, 'safety_ratings', [])]
        }

    if hasattr(response, 'usage_metadata') and response.usage_metadata:
        result['usageMetadata'] = {
            'promptTokenCount': response.usage_metadata.prompt_token_count,
            'candidatesTokenCount': response.usage_metadata.candidates_token_count,
            'totalTokenCount': response.usage_metadata.total_token_count
        }
    return result

@router.post("/{path:path}")
async def proxy_post_requests(path: str, request: Request, db: Session = Depends(get_db), api_key: str = Depends(get_api_key)):
    # 1. Authenticate user
    user = crud.get_user_by_api_key(db, api_key=api_key)
    print(f"--- [AIProxy Debug] --- User from DB: {user}")
    if not user:
        raise HTTPException(status_code=401, detail="Invalid API Key")

    # 2. Check initial balance
    if user.token_balance <= 0:
        raise HTTPException(status_code=402, detail="Insufficient balance")

    request_body = await request.json()
    
    print("\n--- [AIProxy Debug] --- Received Request ---")
    print(f"Request Headers: {request.headers}")
    print(f"Path: {path}")
    print(f"Body: {json.dumps(request_body, indent=2)}")
    print("----------------------------------------------------")

    response_data: Any
    tokens_used = 0

    try:
        # 3. Route to the appropriate genai SDK method based on the path
        if ":generateContent" in path:
            model_name = path.split('/')[-1].split(':')[0]
            model = genai.GenerativeModel(model_name)
            
            # Forward request to Gemini API using the SDK
            response = model.generate_content(
                contents=request_body.get('contents'),
                generation_config=request_body.get('generation_config'),
                safety_settings=request_body.get('safety_settings'),
                tools=request_body.get('tools'),
            )
            
            # Convert SDK response to dict and calculate tokens
            response_data = _convert_generate_content_response_to_dict(response)
            if 'usageMetadata' in response_data:
                tokens_used = response_data['usageMetadata'].get('totalTokenCount', 0)

        elif ":embedContent" in path:
            model_name = path.split('/')[-1].split(':')[0]
            response = genai.embed_content(
                model=f"models/{model_name}",
                content=request_body.get('content'),
                task_type=request_body.get('task_type')
            )
            response_data = response
            # Token counting for embedContent is usually done by the client or based on pricing
            # For simplicity, we can try to estimate it based on input text length if needed
            # or assume a fixed cost per request if not provided by the API.
            # Here we will count input tokens if possible.
            # Note: This is a rough estimation.
            if 'content' in request_body:
                text_to_count = ""
                if isinstance(request_body['content'], dict) and 'parts' in request_body['content']:
                    for part in request_body['content']['parts']:
                        if 'text' in part:
                            text_to_count += part['text']
                elif isinstance(request_body['content'], str):
                    text_to_count = request_body['content']
                
                if text_to_count:
                     # This is a very rough approximation, real tokenization is more complex
                    tokens_used = len(text_to_count) // 4

        elif ":countTokens" in path:
            model_name = path.split('/')[-1].split(':')[0]
            model = genai.GenerativeModel(model_name)
            response = model.count_tokens(request_body.get('contents'))
            response_data = {'totalTokens': response.total_tokens}
            tokens_used = 0 # No cost for counting tokens

        else:
            raise HTTPException(status_code=404, detail=f"API method for path '{path}' is not supported by this proxy.")

        print("--- [AIProxy Debug] --- Received Response from Gemini ---")
        print(f"Body: {json.dumps(response_data, indent=2)}")
        print("------------------------------------------------------\n")

    except Exception as e:
        print(f"\n--- [AIProxy Debug] --- Received ERROR from Gemini ---")
        print(f"Error: {e}")
        print("------------------------------------------------------\n")
        raise HTTPException(status_code=502, detail=f"Error contacting Gemini API: {str(e)}")

    # 4. Deduct tokens from user's balance
    if tokens_used > 0:
        updated_user = crud.deduct_user_tokens(db, user_id=user.id, amount=tokens_used)
        if not updated_user:
            raise HTTPException(status_code=402, detail="Failed to deduct tokens, balance might be insufficient.")

    # 5. Return the response from Gemini API
    if isinstance(response_data, dict):
        return JSONResponse(content=response_data)
    
    return response_data
