File size: 1,180 Bytes
dc592b9
2c61b33
f68b669
2c61b33
dc592b9
 
 
 
2c61b33
 
 
 
 
dc592b9
2c61b33
 
dc592b9
2c61b33
dc592b9
 
 
 
 
2c61b33
 
 
dc592b9
 
 
2c61b33
 
dc592b9
 
 
 
 
 
 
 
 
2c61b33
dc592b9
20e295b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import openai, os, json

prompt_base_path = ""

client = openai.OpenAI(
    api_key=os.getenv("GEMINI_API_KEY"),
    base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)


def run_gemini(
    target_prompt: str,
    prompt_in_path: str,
    llm_model: str = "gemini-2.0-flash-exp",
) -> str:
    """
    gemini 모델 사용 코드
    """

    # Load prompt
    with open(
        os.path.join(prompt_base_path, prompt_in_path), "r", encoding="utf-8"
    ) as file:
        prompt_dict = json.load(file)

    system_prompt = prompt_dict["system_prompt"]
    user_prompt_head, user_prompt_tail = (
        prompt_dict["user_prompt"]["head"],
        prompt_dict["user_prompt"]["tail"],
    )

    user_prompt_text = "\n".join([user_prompt_head, target_prompt, user_prompt_tail])
    input_content = [{"type": "text", "text": user_prompt_text}]

    chat_completion = client.beta.chat.completions.parse(
        model=llm_model,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": input_content},
        ],
    )
    chat_output = chat_completion.choices[0].message.content
    return chat_output