vsp-demo / tests /vsp /llm /test_prompt.py
navkast
Add a test for main.py (#3)
e261f25 unverified
from unittest.mock import AsyncMock, MagicMock
import pytest
from vsp.llm.llm_service import LLMService
from vsp.llm.prompt import Prompt
from vsp.llm.prompt_text import PromptText
@pytest.fixture
def mock_llm_service():
service = AsyncMock(spec=LLMService)
service.invoke.return_value = "Mocked response"
return service
def test_prompt_initialization(mock_llm_service):
user_prompt = PromptText("Hello, {name}!")
prompt = Prompt(mock_llm_service, user_prompt=user_prompt)
assert prompt._user_prompt == user_prompt
def test_prompt_update_prompts(mock_llm_service):
user_prompt = PromptText("Hello, {name}!")
prompt = Prompt(mock_llm_service, user_prompt=user_prompt)
prompt.upsert_inputs({"name": "Alice"})
assert prompt._user_prompt.get_prompt() == "Hello, Alice!"
@pytest.mark.asyncio
async def test_prompt_evaluate(mock_llm_service):
user_prompt = PromptText("Hello, {name}!")
prompt = Prompt(mock_llm_service, user_prompt=user_prompt)
prompt.upsert_inputs({"name": "Bob"})
result = await prompt.evaluate()
assert result == "Mocked response"
mock_llm_service.invoke.assert_called_once_with(
user_prompt="Hello, Bob!", system_prompt=None, partial_assistant_prompt=None, max_tokens=1000, temperature=0.0
)
def test_prompt_missing_prompts():
with pytest.raises(ValueError):
Prompt(MagicMock())
@pytest.mark.asyncio
async def test_prompt_with_all_parameters(mock_llm_service):
user_prompt = PromptText("User: {user_input}")
system_prompt = PromptText("System: {system_input}")
assistant_prompt = PromptText("Assistant: {assistant_input}")
prompt = Prompt(
mock_llm_service,
user_prompt=user_prompt,
system_prompt=system_prompt,
partial_assistant_prompt=assistant_prompt,
max_tokens=500,
temperature=0.7,
)
prompt.upsert_inputs({"user_input": "Hello", "system_input": "Be polite", "assistant_input": "Greetings"})
await prompt.evaluate()
mock_llm_service.invoke.assert_called_once_with(
user_prompt="User: Hello",
system_prompt="System: Be polite",
partial_assistant_prompt="Assistant: Greetings",
max_tokens=500,
temperature=0.7,
)