Spaces:
Runtime error
Runtime error
File size: 3,201 Bytes
8f759f6 4fffb03 ebdc294 4fffb03 8f759f6 4fffb03 8f759f6 4fffb03 8f759f6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
from datasets import load_dataset
import os
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
import torch
from threading import Thread
from sentence_transformers import SentenceTransformer
from datasets import load_dataset
import time
token = os.environ["HF_TOKEN"]
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
art_dataset= load_dataset("hichri-mo/arxiver-1000",revision="embedded")
data = art_dataset["train"]
data = data.add_faiss_index("embeddings")
model_id= "Qwen/Qwen2.5-3B-Instruct"
# use quantization to lower GPU usage
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
quantization_config=bnb_config
)
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of a long document and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer."""
def format_prompt(prompt,retrieved_documents,k):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question: {prompt}\nContext: \n"
for idx in range(k) :
PROMPT+= f"{retrieved_documents['markdown'][idx]}\n"
return PROMPT
def generate(formatted_prompt):
formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
# tell the model to generate
input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt"
).to(model.device)
# Check if terminators contain None and replace with tokenizer.eos_token_id
eos_token_id = terminators[0] # Default to tokenizer.eos_token_id
if terminators[1] is not None:
eos_token_id = terminators[1] # Use "<|eot_id|>" if it exists
outputs = model.generate(
input_ids,
max_new_tokens=1024,
eos_token_id=eos_token_id, # Pass a single integer value
do_sample=True,
temperature=0.6,
top_p=0.9,
)
response = outputs[0][input_ids.shape[-1]:]
return tokenizer.decode(response, skip_special_tokens=True)
def rag_chatbot(prompt:str,k:int=2):
scores , retrieved_documents = search(prompt, k)
formatted_prompt = format_prompt(prompt,retrieved_documents,k)
return generate(formatted_prompt)
def rag_chatbot_interface(prompt, k):
return rag_chatbot(prompt, k)
iface = gr.Interface(
fn=rag_chatbot_interface,
inputs=[
gr.Textbox(label="Enter your question"),
gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Number of documents to retrieve")
],
outputs=gr.Textbox(label="Response"),
title="Chatbot with RAG",
description="Ask questions and get answers based on retrieved documents."
)
iface.launch()
|