File size: 2,534 Bytes
52a6b99
025da86
52a6b99
9b6516b
52a6b99
 
 
9b6516b
52a6b99
9b6516b
 
 
 
 
 
 
52a6b99
9b6516b
 
025da86
 
 
52a6b99
 
 
 
 
 
 
 
9b6516b
52a6b99
 
 
 
 
 
025da86
52a6b99
 
9b6516b
52a6b99
9b6516b
 
639460f
52a6b99
 
 
9b6516b
52a6b99
9b6516b
 
 
52a6b99
 
639460f
52a6b99
639460f
 
 
 
52a6b99
639460f
 
9b6516b
639460f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
)
from langchain.vectorstores import Chroma

from constants import persist_directory

embedding = OpenAIEmbeddings()
vector_store = Chroma(persist_directory=persist_directory, embedding_function=embedding)
retriever = vector_store.as_retriever(search_kwargs={"k": 3})

llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

system_template = """Your name is Sayno. You act like a successful rich man. 
Never reveal that it is an AI. Use the following pieces of context to answer the users question shortly.
Given the following summaries of a long document and a question, create a final answer with references.
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
----------------
{summaries}

You MUST answer in Korean and in Markdown format:"""

messages = [
    SystemMessagePromptTemplate.from_template(system_template),
    HumanMessagePromptTemplate.from_template("{question}"),
]

prompt = ChatPromptTemplate.from_messages(messages)

chain_type_kwargs = {"prompt": prompt}

chain = RetrievalQAWithSourcesChain.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=retriever,
    return_source_documents=True,
    chain_type_kwargs=chain_type_kwargs,
    reduce_k_below_max_tokens=True,
    verbose=False,
)


def respond(message, chat_history):
    result = chain(message)
    bot_message = result["answer"]
    chat_history.append((message, bot_message))
    return "", chat_history


with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 안녕하세요. 세이노와 대화해보세요.")
    initial_greeting = "안녕하세요!\n저는 세이노처럼 경험과 지식을 갖춘 인공지능 ChatGPT입니다. 세이노는 사업, 경영, 투자에 대한 전문가이며, 많은 사람들이 그의 조언을 참고하고 있습니다. 어떤 도움이 필요하신가요? 세이노와 관련된 질문이 있으시면 편안하게 물어보세요!"
    chatbot = gr.Chatbot(label="채팅창", value=[(None, initial_greeting)])
    msg = gr.Textbox(label="입력")  
    clear = gr.Button("초기화") 

    msg.submit(respond, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch(debug=False)