XiangJinYu commited on
Commit
e6b999f
1 Parent(s): e11c55c

Upload 4 files

Browse files
Files changed (4) hide show
  1. README (3).md +12 -0
  2. app (1).py +91 -0
  3. gitattributes (2).txt +34 -0
  4. requirements (2).txt +6 -0
README (3).md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: "Chat PDF"
3
+ emoji: 📄
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.28.2
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app (1).py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import time
4
+ from langchain.document_loaders import OnlinePDFLoader
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.llms import OpenAI
7
+ from langchain.embeddings import OpenAIEmbeddings
8
+ from langchain.vectorstores import Chroma
9
+ from langchain.chains import ConversationalRetrievalChain
10
+
11
+ def loading_pdf():
12
+ return "加载中...⏳"
13
+
14
+ def pdf_changes(pdf_doc, openai_api_key, chunk_size, chunk_overlap, temperature, return_source):
15
+ if not openai_api_key:
16
+ return "你忘记了OpenAI API密钥🗝️"
17
+ os.environ['OPENAI_API_KEY'] = openai_api_key
18
+ loader = OnlinePDFLoader(pdf_doc.name)
19
+ documents = loader.load()
20
+ text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
21
+ texts = text_splitter.split_documents(documents)
22
+ embeddings = OpenAIEmbeddings()
23
+ db = Chroma.from_documents(texts, embeddings)
24
+ retriever = db.as_retriever()
25
+ global qa
26
+ qa = ConversationalRetrievalChain.from_llm(
27
+ llm=OpenAI(temperature=temperature,model="text-davinci-003",max_tokens=1000),
28
+ retriever=retriever,
29
+ return_source_documents=return_source)
30
+ return "准备就绪🚀"
31
+
32
+ def add_text(history, text):
33
+ history = history + [(text, None)]
34
+ return history, ""
35
+
36
+ def bot(history):
37
+ response = infer(history[-1][0], history)
38
+ history[-1][1] = ""
39
+ for character in response:
40
+ history[-1][1] += character
41
+ time.sleep(0.05)
42
+ yield history
43
+
44
+ def infer(question, history):
45
+ res = []
46
+ for human, ai in history[:-1]:
47
+ pair = (human, ai)
48
+ res.append(pair)
49
+ chat_history = res
50
+ query = question
51
+ result = qa({"question": query, "chat_history": chat_history})
52
+ return result["answer"]
53
+
54
+ css="""
55
+ #col-container {max-width: 700px; margin-left: auto; margin-right: auto; background-color: #f0f0f0;}
56
+ """
57
+
58
+ title = """
59
+ <div style="text-align: center;max-width: 700px;">
60
+ <h1 style="color: #3399FF; font-family: 'Courier New', Courier, monospace;">Chat PDF[text-davinci-003]📚</h1>
61
+ <p style="text-align: center;color: #666666; font-family: 'Courier New', Courier, monospace;">上传你的PDF,并将其加载到向量库中,<br />
62
+ 当一切准备就绪,你就可以开始提出关于pdf的问题了 🧐 <br />
63
+ 此版本使用text-davinci-003作为LLM</p>
64
+ </div>
65
+ """
66
+
67
+ with gr.Blocks(css=css) as demo:
68
+ with gr.Column(elem_id="col-container"):
69
+ gr.HTML(title)
70
+ with gr.Column():
71
+ openai_api_key = gr.Textbox(label="你的OpenAI API密钥🔐", type="password")
72
+ pdf_doc = gr.File(label="加载一个pdf📄", file_types=['.pdf'], type="file")
73
+ with gr.Row():
74
+ langchain_status = gr.Textbox(label="状态📊", placeholder="", interactive=False)
75
+ chunk_size_slider = gr.Slider(minimum=100, maximum=2000, value=1000, step=100, label='块大小📏')
76
+ chunk_overlap_slider = gr.Slider(minimum=0, maximum=1000, value=0, step=50, label='块重叠🔀')
77
+ temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.5, step=0.1, label='温度🌡️')
78
+ return_source_checkbox = gr.Checkbox(label='返回源文件📑', default=False)
79
+ load_pdf = gr.Button("加载PDF到LangChain🔄")
80
+ chatbot = gr.Chatbot([], elem_id="chatbot").style(height=350)
81
+ question = gr.Textbox(label="问题❓", placeholder="输入你的问题并按回车 ")
82
+ submit_btn = gr.Button("发送消息📨")
83
+ load_pdf.click(loading_pdf, None, langchain_status, queue=False)
84
+ load_pdf.click(pdf_changes, inputs=[pdf_doc, openai_api_key, chunk_size_slider, chunk_overlap_slider, temperature_slider, return_source_checkbox], outputs=[langchain_status], queue=False)
85
+ question.submit(add_text, [chatbot, question], [chatbot, question]).then(
86
+ bot, chatbot, chatbot
87
+ )
88
+ submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
89
+ bot, chatbot, chatbot)
90
+
91
+ demo.launch()
gitattributes (2).txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements (2).txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai
2
+ tiktoken
3
+ chromadb
4
+ langchain
5
+ unstructured
6
+ unstructured[local-inference]