Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import streamlit as st
|
3 |
+
import torch
|
4 |
+
from auto_gptq import AutoGPTQForCausalLM
|
5 |
+
from pdf2image import convert_from_path
|
6 |
+
from transformers import AutoTokenizer, TextStreamer, pipeline
|
7 |
+
from langchain_community.document_loaders import PyPDFLoader
|
8 |
+
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
9 |
+
from langchain_community.vectorstores import Chroma
|
10 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
11 |
+
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
12 |
+
from langchain_core.prompts import PromptTemplate
|
13 |
+
from langchain.chains import RetrievalQA
|
14 |
+
|
15 |
+
os.system('sudo apt-get install -y poppler-utils')
|
16 |
+
|
17 |
+
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
|
18 |
+
|
19 |
+
meta_images = convert_from_path("Medical_Book.pdf", dpi=88)
|
20 |
+
|
21 |
+
loader = PyPDFLoader("Medical_Book.pdf")
|
22 |
+
docs = loader.load()
|
23 |
+
|
24 |
+
embeddings = HuggingFaceInstructEmbeddings(
|
25 |
+
model_name="hkunlp/instructor-large", model_kwargs={"device": DEVICE}
|
26 |
+
)
|
27 |
+
|
28 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
|
29 |
+
texts = text_splitter.split_documents(docs)
|
30 |
+
|
31 |
+
db = Chroma.from_documents(texts, embeddings, persist_directory="db")
|
32 |
+
|
33 |
+
model_name_or_path = "TheBloke/Llama-2-13B-chat-GPTQ"
|
34 |
+
model_basename = "model"
|
35 |
+
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
37 |
+
|
38 |
+
model = AutoGPTQForCausalLM.from_quantized(
|
39 |
+
model_name_or_path,
|
40 |
+
revision="gptq-4bit-128g-actorder_True",
|
41 |
+
model_basename=model_basename,
|
42 |
+
use_safetensors=True,
|
43 |
+
trust_remote_code=True,
|
44 |
+
inject_fused_attention=False,
|
45 |
+
device=DEVICE,
|
46 |
+
quantize_config=None,
|
47 |
+
)
|
48 |
+
|
49 |
+
DEFAULT_SYSTEM_PROMPT = """
|
50 |
+
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
51 |
+
|
52 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
53 |
+
""".strip()
|
54 |
+
|
55 |
+
|
56 |
+
def generate_prompt(prompt: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT) -> str:
|
57 |
+
return f"""
|
58 |
+
[INST] <>
|
59 |
+
{system_prompt}
|
60 |
+
<>
|
61 |
+
|
62 |
+
{prompt} [/INST]
|
63 |
+
""".strip()
|
64 |
+
|
65 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
66 |
+
|
67 |
+
text_pipeline = pipeline(
|
68 |
+
"text-generation",
|
69 |
+
model=model,
|
70 |
+
tokenizer=tokenizer,
|
71 |
+
max_new_tokens=1024,
|
72 |
+
temperature=0,
|
73 |
+
top_p=0.95,
|
74 |
+
repetition_penalty=1.15,
|
75 |
+
streamer=streamer,
|
76 |
+
)
|
77 |
+
|
78 |
+
llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0})
|
79 |
+
|
80 |
+
SYSTEM_PROMPT = "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer."
|
81 |
+
|
82 |
+
template = generate_prompt(
|
83 |
+
"""
|
84 |
+
{context}
|
85 |
+
|
86 |
+
Question: {question}
|
87 |
+
""",
|
88 |
+
system_prompt=SYSTEM_PROMPT,
|
89 |
+
)
|
90 |
+
|
91 |
+
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
|
92 |
+
|
93 |
+
qa_chain = RetrievalQA.from_chain_type(
|
94 |
+
llm=llm,
|
95 |
+
chain_type="stuff",
|
96 |
+
retriever=db.as_retriever(search_kwargs={"k": 2}),
|
97 |
+
return_source_documents=True,
|
98 |
+
chain_type_kwargs={"prompt": prompt},
|
99 |
+
)
|
100 |
+
|
101 |
+
# result = qa_chain("what is Doppler ultrasonography?")
|
102 |
+
# print(result["source_documents"][0].page_content)
|
103 |
+
st.title("Medical Chatbot")
|
104 |
+
|
105 |
+
if "history" not in st.session_state:
|
106 |
+
st.session_state.history = []
|
107 |
+
|
108 |
+
user_input = st.text_input("Ask a question:", key="input")
|
109 |
+
|
110 |
+
if user_input:
|
111 |
+
result = qa_chain({"question": user_input})
|
112 |
+
st.session_state.history.append({"question": user_input, "answer": result["result"]})
|
113 |
+
|
114 |
+
for entry in st.session_state.history:
|
115 |
+
st.write(f"**Question:** {entry['question']}")
|
116 |
+
st.write(f"**Answer:** {entry['answer']}")
|
117 |
+
|
118 |
+
if st.button("Clear History"):
|
119 |
+
st.session_state.history = []
|