File size: 2,033 Bytes
c7f77c1
 
 
 
 
 
 
74e2899
c7f77c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74e2899
 
c7f77c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74e2899
c7f77c1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFacePipeline
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
import gradio as gr
import spaces


# Load TinyLlama model
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)

# Create a text generation pipeline
pipe = pipeline(
    "text-generation",
    model=model, 
    tokenizer=tokenizer, 
    max_new_tokens=512,
    do_sample=True,
    temperature=0.7,
    top_p=0.95,
    top_k=40,
    repetition_penalty=1.1
)

# Wrap the pipeline in a LangChain HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)

# Load embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")

# Load the FAISS index
db_FAISS = FAISS.load_local("/home/user/app/", embeddings, allow_dangerous_deserialization=True)

# Create a RetrievalQA chain
qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=db_FAISS.as_retriever(search_kwargs={"k": 3}),
    return_source_documents=True
)

print("fuck14")
@spaces.GPU
def query_documents(query):
    result = qa_chain({"query": query})
    answer = result['result']
    sources = [doc.metadata for doc in result['source_documents']]
    return answer, sources

# Gradio interface
def gradio_interface(query):
    answer, sources = query_documents(query)
    source_text = "\n\nSources:\n" + "\n".join([f"Source: {s.get('source', 'Unknown')}, Page: {s.get('page', 'Unknown')}" for s in sources])
    return answer + source_text

iface = gr.Interface(
    fn=gradio_interface,
    inputs="text",
    outputs="text",
    title="Document Q&A with TinyLlama",
    description="Ask questions about your documents"
)

# Hugging Face Spaces
iface.launch()