### llm_client.py import ollama import numpy as np from prompt import SYSTEM_PROMPT class LlmClient: def __init__(self, model_name: str): self.model_name = model_name def generate_text(self, prompt: str) -> str: response = ollama.chat( model=self.model_name, messages=[ {"role": "system", "content": SYSTEM_PROMPT.strip()}, {"role": "user", "content": prompt.strip()} ] ) return response['message']['content'].strip() def get_pseudo_embedding(self, text: str): # Gerçek embedding kullanılmıyor, basit temsil (ortalama ASCII) vec = np.array([ord(c) for c in text if ord(c) < 128], dtype=np.float32) padded = np.zeros(768, dtype=np.float32) padded[:min(768, len(vec))] = vec[:768] return padded