File size: 2,337 Bytes
65a9c81
 
 
c0e05c3
f207bfc
 
c0e05c3
4ccd956
 
c0e05c3
4ccd956
 
c0e05c3
 
4ccd956
c0e05c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f207bfc
 
 
 
c0e05c3
f207bfc
 
 
 
 
 
 
c0e05c3
f207bfc
 
 
 
 
c0e05c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from langchain_community.document_loaders import HuggingFaceDatasetLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter

from transformers import AutoTokenizer, pipeline
import gradio as gr
import os
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv()) # read local .env file
hf_api_key = os.environ['HF_TOKEN']

# Load the data
loader = HuggingFaceDatasetLoader(path="databricks/databricks-dolly-15k", page_content_column="context", use_auth_token=hf_api_key)
data = loader.load()

# Document Transformers
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
docs = text_splitter.split_documents(data)

# Text Embedding
embeddings = HuggingFaceEmbeddings(
    model_name="sentence-transformers/all-MiniLM-l6-v2",
    model_kwargs={'device':'cpu'},
    encode_kwargs={'normalize_embeddings': False}
)

# Set up Vector Stores
db = FAISS.from_documents(docs, embeddings)

# Set up retrievers
retriever = db.as_retriever(search_kwargs={"k": 4})

# Load the tokenizer associated with the specified model
tokenizer = AutoTokenizer.from_pretrained("Intel/dynamic_tinybert", padding=True, truncation=True, max_length=512)

# Define a question-answering pipeline using the model and tokenizer
question_answerer = pipeline(
    "question-answering",
    model="Intel/dynamic_tinybert",
    tokenizer=tokenizer,
    return_tensors='pt'
)

def generate(question):
    docs = retriever.get_relevant_documents(question)
    context = docs[0].page_content
    squad_ex = question_answerer(question=question, context=context)
    return squad_ex['answer']


def respond(message, chat_history):
    bot_message = generate(message)
    chat_history.append((message, bot_message))
    
    return "", chat_history

# Set up the chat interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot(height=240) #just to fit the notebook
    msg = gr.Textbox(label="Ask away")
    btn = gr.Button("Submit")
    clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")

    btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
    msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit

demo.queue().launch()