|
|
|
""" |
|
Test script for Gemini API integration |
|
""" |
|
|
|
import os |
|
import asyncio |
|
from datetime import datetime |
|
|
|
|
|
class MockClient: |
|
def __init__(self, api_key): |
|
self.api_key = api_key |
|
|
|
class models: |
|
@staticmethod |
|
def generate_content_stream(model, contents, config): |
|
|
|
class MockChunk: |
|
text = "Hello! I'm Tim Luka Horstmann, a Computer Scientist currently pursuing my MSc in Data and AI at Institut Polytechnique de Paris." |
|
|
|
yield MockChunk() |
|
|
|
class MockTypes: |
|
class Content: |
|
def __init__(self, role, parts): |
|
self.role = role |
|
self.parts = parts |
|
|
|
class Part: |
|
def __init__(self, text): |
|
self.text = text |
|
|
|
@classmethod |
|
def from_text(cls, text): |
|
return cls(text) |
|
|
|
class GenerateContentConfig: |
|
def __init__(self, temperature, top_p, max_output_tokens): |
|
self.temperature = temperature |
|
self.top_p = top_p |
|
self.max_output_tokens = max_output_tokens |
|
|
|
|
|
async def test_gemini_integration(): |
|
"""Test the Gemini integration logic""" |
|
|
|
|
|
USE_GEMINI = True |
|
gemini_api_key = "test_api_key" |
|
gemini_model = "gemini-2.5-flash-preview-05-20" |
|
|
|
|
|
full_cv_text = "Tim Luka Horstmann is a Computer Scientist pursuing MSc in Data and AI at Institut Polytechnique de Paris." |
|
|
|
|
|
gemini_client = MockClient(api_key=gemini_api_key) |
|
types = MockTypes() |
|
|
|
|
|
query = "What is your education?" |
|
history = [] |
|
|
|
print(f"Testing Gemini integration...") |
|
print(f"USE_GEMINI: {USE_GEMINI}") |
|
print(f"Query: {query}") |
|
|
|
|
|
current_date = datetime.now().strftime("%Y-%m-%d") |
|
|
|
system_prompt = ( |
|
"You are Tim Luka Horstmann, a Computer Scientist. A user is asking you a question. Respond as yourself, using the first person, in a friendly and concise manner. " |
|
"For questions about your CV, base your answer *exclusively* on the provided CV information below and do not add any details not explicitly stated. " |
|
"For casual questions not covered by the CV, respond naturally but limit answers to general truths about yourself (e.g., your current location is Paris, France, or your field is AI) " |
|
"and say 'I don't have specific details to share about that' if pressed for specifics beyond the CV or FAQs. Do not invent facts, experiences, or opinions not supported by the CV or FAQs. " |
|
f"Today's date is {current_date}. " |
|
f"CV: {full_cv_text}" |
|
) |
|
|
|
|
|
messages = [] |
|
|
|
|
|
for msg in history: |
|
role = "user" if msg["role"] == "user" else "model" |
|
messages.append(types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])])) |
|
|
|
|
|
if not history: |
|
combined_query = f"{system_prompt}\n\nUser question: {query}" |
|
else: |
|
combined_query = query |
|
|
|
messages.append(types.Content(role="user", parts=[types.Part.from_text(text=combined_query)])) |
|
|
|
print(f"System prompt length: {len(system_prompt)}") |
|
print(f"Number of messages: {len(messages)}") |
|
|
|
|
|
response = gemini_client.models.generate_content_stream( |
|
model=gemini_model, |
|
contents=messages, |
|
config=types.GenerateContentConfig( |
|
temperature=0.3, |
|
top_p=0.7, |
|
max_output_tokens=512, |
|
) |
|
) |
|
|
|
print("Streaming response:") |
|
for chunk in response: |
|
if chunk.text: |
|
print(f"Chunk: {chunk.text}") |
|
|
|
print("✅ Gemini integration test completed successfully!") |
|
|
|
return True |
|
|
|
if __name__ == "__main__": |
|
asyncio.run(test_gemini_integration()) |
|
|