anpigon JUNGU commited on
Commit
52a6b99
0 Parent(s):

Duplicate from JUNGU/talktosayno

Browse files

Co-authored-by: HAN JUNGU <JUNGU@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +110 -0
  4. docs.pdf +3 -0
  5. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ docs.pdf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Talktosayno
3
+ emoji: 📉
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.34.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: openrail
11
+ duplicated_from: JUNGU/talktosayno
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import ChatOpenAI
2
+ from langchain.document_loaders import PyPDFLoader
3
+ from langchain.embeddings.openai import OpenAIEmbeddings
4
+ from langchain.embeddings.cohere import CohereEmbeddings
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
7
+ from langchain.vectorstores import Chroma
8
+ from PyPDF2 import PdfWriter
9
+ import gradio as gr
10
+ import os
11
+ from dotenv import load_dotenv
12
+ import openai
13
+
14
+ load_dotenv()
15
+ #비밀키 가져오기 시도중
16
+ # api_key = os.getenv('OPENAI_API_KEY') ## .env 파일 업로드하면 숨겨지지 않음 안됨
17
+ # api_key = os.environ['my_secret'] ## 안불러와짐
18
+ # api_key = os.getenv('my_secret') ## 3트 .env 대신 secret키를 불러오는 형태로 도전
19
+ os.environ["OPENAI_API_KEY"] = os.environ['my_secret']
20
+
21
+ loader = PyPDFLoader("/home/user/app/docs.pdf")
22
+ documents = loader.load()
23
+
24
+ text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0)
25
+ texts = text_splitter.split_documents(documents)
26
+
27
+ #vector embedding
28
+ embeddings = OpenAIEmbeddings()
29
+ vector_store = Chroma.from_documents(texts, embeddings)
30
+ retriever = vector_store.as_retriever(search_kwargs={"k": 2})
31
+
32
+ from langchain.chat_models import ChatOpenAI
33
+ from langchain.chains import RetrievalQAWithSourcesChain
34
+
35
+ llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4
36
+
37
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
38
+ llm=llm,
39
+ chain_type="stuff",
40
+ retriever = retriever,
41
+ return_source_documents=True)
42
+
43
+ from langchain.prompts.chat import (
44
+ ChatPromptTemplate,
45
+ SystemMessagePromptTemplate,
46
+ HumanMessagePromptTemplate,
47
+ )
48
+
49
+ system_template="""Use the following pieces of context to answer the users question shortly.
50
+ Given the following summaries of a long document and a question, create a final answer with references ("SOURCES"), use "SOURCES" in capital letters regardless of the number of sources.
51
+ If you don't know the answer, just say that "I don't know", don't try to make up an answer.
52
+ ----------------
53
+ {summaries}
54
+
55
+ You MUST answer in Korean and in Markdown format:"""
56
+
57
+ messages = [
58
+ SystemMessagePromptTemplate.from_template(system_template),
59
+ HumanMessagePromptTemplate.from_template("{question}")
60
+ ]
61
+
62
+ prompt = ChatPromptTemplate.from_messages(messages)
63
+
64
+ from langchain.chat_models import ChatOpenAI
65
+ from langchain.chains import RetrievalQAWithSourcesChain
66
+
67
+ chain_type_kwargs = {"prompt": prompt}
68
+
69
+ llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4
70
+
71
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
72
+ llm=llm,
73
+ chain_type="stuff",
74
+ retriever = retriever,
75
+ return_source_documents=True,
76
+ chain_type_kwargs=chain_type_kwargs
77
+ )
78
+
79
+ query = "행복한 인생이란?"
80
+ result = chain(query)
81
+
82
+
83
+ for doc in result['source_documents']:
84
+ print('내용 : ' + doc.page_content[0:100].replace('\n', ' '))
85
+ print('파일 : ' + doc.metadata['source'])
86
+ print('페이지 : ' + str(doc.metadata['page']))
87
+
88
+
89
+ def respond(message, chat_history): # 채팅봇의 응답을 처리하는 함수를 정의합니다.
90
+
91
+ result = chain(message)
92
+
93
+ bot_message = result['answer']
94
+
95
+ for i, doc in enumerate(result['source_documents']):
96
+ bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') '
97
+
98
+ chat_history.append((message, bot_message)) # 채팅 기록에 사용자의 메시지와 봇의 응답을 추가합니다.
99
+
100
+ return "", chat_history # 수정된 채팅 기록을 반환합니다.
101
+
102
+ with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()를 사용하여 인터페이스를 생성합니다.
103
+ gr.Markdown("# 안녕하세요. 세이노와 대화해보세요.")
104
+ chatbot = gr.Chatbot(label="채팅창") # '채팅창'이라는 레이블을 가진 채팅봇 컴포넌트를 생성합니다.
105
+ msg = gr.Textbox(label="입력") # '입력'이라는 레이블을 가진 텍스트박스를 생성합니다.
106
+ clear = gr.Button("초기화") # '초기화'라는 레이블을 가진 버튼을 생성합니다.
107
+
108
+ msg.submit(respond, [msg, chatbot], [msg, chatbot]) # 텍스트박스에 메시지를 입력하고 제출하면 respond 함수가 호출되도록 합니다.
109
+ clear.click(lambda: None, None, chatbot, queue=False) # '초기화' 버튼을 클릭하면 채팅 기록을 초기화합니다.
110
+ demo.launch(debug=True) # 인터페이스를 실행합니다. 실행하면 사용자는 '입력' 텍스트박스에 메시지를 작성하고 제출할 수 있으며, '초기화' 버튼을 통해 채팅 기록을 초기화 할 수 있습니다.
docs.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dab840d01bd8582e930da5ccb74c032279e832ed02f7f938953e7f77730d1ad2
3
+ size 4232031
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai
2
+ langchain
3
+ pypdf
4
+ chromadb
5
+ tiktoken
6
+ PyPDF2