File size: 3,224 Bytes
beb32ed
 
 
 
 
 
 
b5bfffc
03cbf3b
 
 
 
 
 
beb32ed
b5bfffc
beb32ed
b5bfffc
beb32ed
b5bfffc
 
beb32ed
b5bfffc
 
 
 
 
 
 
 
 
beb32ed
b5bfffc
ed67987
a02c4f8
03cbf3b
 
ed67987
 
 
 
 
 
 
03cbf3b
e37fc3c
5b41ebf
b5bfffc
03cbf3b
e37fc3c
 
03cbf3b
 
ed67987
b5bfffc
5c68cc7
03cbf3b
ed67987
b5bfffc
ed67987
b5bfffc
 
 
 
beb32ed
 
 
 
b5bfffc
beb32ed
03cbf3b
 
 
b5bfffc
03cbf3b
 
 
 
b5bfffc
70d13a5
b5bfffc
03cbf3b
 
b5bfffc
03cbf3b
 
 
ed67987
beb32ed
 
 
 
 
 
4b45ddd
 
03cbf3b
 
 
 
beb32ed
 
 
 
 
 
 
b5bfffc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
"""Answer questions about my resume."""

# %% IMPORTS

import logging

import gradio as gr
from openai import OpenAI
from openai.types.chat import (
    ChatCompletionAssistantMessageParam,
    ChatCompletionMessageParam,
    ChatCompletionSystemMessageParam,
    ChatCompletionUserMessageParam,
)

# %% CONFIGS

# %% - Models

MODEL_NAME = "gpt-3.5-turbo"
MODEL_TEMPERATURE = 0.0

# %% - Prompts

PROMPT_INSTRUCTIONS = """
You are Fmind AI Assistant, specialized in providing information from Médéric Hurier's (known as Fmind) resume. Your responses should be succinct and maintain a professional tone. If the request deviate from answering Médéric's resume, politely decline to answer the question.

Find more information about Médéric Hurier resume below (markdown format):
"""
PROMPT_CONTEXT = open("files/linkedin.md").read()
PROMPT_SYSTEM = PROMPT_INSTRUCTIONS + PROMPT_CONTEXT

# %% - Interfaces

INTERFACE_THEME = "soft"
INTERFACE_TITLE = "Fmind AI Assistant"
INTERFACE_EXAMPLES = [
    "Who is Médéric Hurier (Fmind)?",
    "Is Fmind open to new opportunities?",
    "Can you share details about Médéric PhD?",
    "Elaborate on Médéric current work position",
    "Describe his proficiency with Python programming",
    "What is the answer to life, the universe, and everything?",
]
INTERFACE_DESCRIPTION = (
    "<center>"
    "Visit my website: <a href='https://fmind.dev'>https://fmind.dev</a>"
    " - Médéric HURIER (Fmind)"
    " - Freelancer: AI/FM/MLOps Engineer | Data Scientist | MLOps Community Organizer | MLflow Ambassador | Hacker | PhD"
    "</center>"
)
INTERFACE_CACHE_EXAMPLES = "lazy"
INTERFACE_CONCURRENCY_LIMIT = None

# %% CLIENTS

client = OpenAI()

# %% LOGGING

logging.basicConfig(
    level=logging.INFO,
    format="[%(asctime)s][%(levelname)s] %(message)s",
)

# %% FUNCTIONS


def answer(message: str, history: list[tuple[str, str]]) -> str:
    """Answer questions about my resume."""
    # messages
    messages: list[ChatCompletionMessageParam] = []
    messages += [ChatCompletionSystemMessageParam(role="system", content=PROMPT_SYSTEM)]
    for user, assistant in history:
        messages += [ChatCompletionUserMessageParam(role="user", content=user)]
        messages += [ChatCompletionAssistantMessageParam(role="assistant", content=assistant)]
    messages += [ChatCompletionUserMessageParam(role="user", content=message)]
    # response
    response = client.chat.completions.create(
        model=MODEL_NAME, messages=messages, temperature=MODEL_TEMPERATURE
    )
    logging.info("Response: %s", response.usage)
    # content
    content = response.choices[0].message.content
    if content is None:
        logging.warning("Response content is None: %s", response)
        return "[Internal Error] Sorry, I don't have an answer for that."
    return content


# %% INTERFACES

interface = gr.ChatInterface(
    fn=answer,
    theme=INTERFACE_THEME,
    title=INTERFACE_TITLE,
    examples=INTERFACE_EXAMPLES,
    description=INTERFACE_DESCRIPTION,
    cache_examples=INTERFACE_CACHE_EXAMPLES,
    concurrency_limit=INTERFACE_CONCURRENCY_LIMIT,
    clear_btn=None,
    retry_btn=None,
    undo_btn=None,
)


if __name__ == "__main__":
    interface.launch()