File size: 4,242 Bytes
			
			| ae2bc6b a9456f8 ae2bc6b a9456f8 ae2bc6b a9456f8 ae2bc6b a9456f8 ae2bc6b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | #!/usr/bin/env python3
"""
Test script for Gemini API integration
"""
import os
import asyncio
from datetime import datetime
# Mock the dependencies for testing
class MockClient:
    def __init__(self, api_key):
        self.api_key = api_key
    
    class models:
        @staticmethod
        def generate_content_stream(model, contents, config):
            # Mock streaming response
            class MockChunk:
                text = "Hello! I'm Tim Luka Horstmann, a Computer Scientist currently pursuing my MSc in Data and AI at Institut Polytechnique de Paris."
            
            yield MockChunk()
class MockTypes:
    class Content:
        def __init__(self, role, parts):
            self.role = role
            self.parts = parts
    
    class Part:
        def __init__(self, text):
            self.text = text
        
        @classmethod
        def from_text(cls, text):
            return cls(text)
    
    class GenerateContentConfig:
        def __init__(self, temperature, top_p, max_output_tokens):
            self.temperature = temperature
            self.top_p = top_p
            self.max_output_tokens = max_output_tokens
# Test function similar to our Gemini implementation
async def test_gemini_integration():
    """Test the Gemini integration logic"""
    
    # Mock environment variables
    USE_GEMINI = True
    gemini_api_key = "test_api_key"
    gemini_model = "gemini-2.5-flash-preview-05-20"
    
    # Mock full CV text
    full_cv_text = "Tim Luka Horstmann is a Computer Scientist pursuing MSc in Data and AI at Institut Polytechnique de Paris."
    
    # Initialize mock client
    gemini_client = MockClient(api_key=gemini_api_key)
    types = MockTypes()
    
    # Test query and history
    query = "What is your education?"
    history = []
    
    print(f"Testing Gemini integration...")
    print(f"USE_GEMINI: {USE_GEMINI}")
    print(f"Query: {query}")
    
    # Simulate the Gemini function logic
    current_date = datetime.now().strftime("%Y-%m-%d")
    
    system_prompt = (
        "You are Tim Luka Horstmann, a Computer Scientist. A user is asking you a question. Respond as yourself, using the first person, in a friendly and concise manner. "
        "For questions about your CV, base your answer *exclusively* on the provided CV information below and do not add any details not explicitly stated. "
        "For casual questions not covered by the CV, respond naturally but limit answers to general truths about yourself (e.g., your current location is Paris, France, or your field is AI) "
        "and say 'I don't have specific details to share about that' if pressed for specifics beyond the CV or FAQs. Do not invent facts, experiences, or opinions not supported by the CV or FAQs. "
        f"Today's date is {current_date}. "
        f"CV: {full_cv_text}"
    )
    # Build messages for Gemini (no system role - embed instructions in first user message)
    messages = []
    
    # Add conversation history
    for msg in history:
        role = "user" if msg["role"] == "user" else "model"
        messages.append(types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])]))
    
    # Add current query with system prompt embedded
    if not history:  # If no history, include system prompt with the first message
        combined_query = f"{system_prompt}\n\nUser question: {query}"
    else:
        combined_query = query
    
    messages.append(types.Content(role="user", parts=[types.Part.from_text(text=combined_query)]))
    print(f"System prompt length: {len(system_prompt)}")
    print(f"Number of messages: {len(messages)}")
    
    # Mock the streaming response
    response = gemini_client.models.generate_content_stream(
        model=gemini_model,
        contents=messages,
        config=types.GenerateContentConfig(
            temperature=0.3,
            top_p=0.7,
            max_output_tokens=512,
        )
    )
    
    print("Streaming response:")
    for chunk in response:
        if chunk.text:
            print(f"Chunk: {chunk.text}")
    
    print("✅ Gemini integration test completed successfully!")
    
    return True
if __name__ == "__main__":
    asyncio.run(test_gemini_integration())
 | 
