alexkueck commited on
Commit
72aebc1
1 Parent(s): 87b0225

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -19
app.py CHANGED
@@ -1,8 +1,10 @@
 
1
  import gradio as gr
2
- from langchain.chains import RagChain
3
  from langchain.vectorstores import Chroma
4
  from transformers import RagTokenizer, RagSequenceForGeneration
5
  from sentence_transformers import SentenceTransformer
 
 
6
 
7
  #Konstanten
8
  ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
@@ -20,11 +22,16 @@ model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use
20
  # Verbindung zur Chroma DB und Laden der Dokumente
21
  chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
22
 
 
 
 
 
 
23
  # Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
24
- retriever = chroma_db.as_retriever()
25
 
26
  # Erstellung der RAG-Kette mit dem benutzerdefinierten Retriever
27
- rag_chain = RagChain(model=model, retriever=retriever, tokenizer=tokenizer, vectorstore=chroma_db)
28
  #############################################
29
 
30
 
@@ -45,23 +52,25 @@ def document_retrieval_chroma2():
45
 
46
 
47
 
48
- def get_rag_response(prompt):
49
  global rag_chain
50
- #rag-chain nutzen, um Antwort zu generieren
51
- result = rag_chain({"Frage: " : prompt})
52
-
53
- #relevante Dokumente extrahieren
54
- docs = result['docs']
55
- passages = [doc['text'] for doc in docs]
56
- links = [doc['url'] for doc in docs]
57
-
58
- #Antwort generieren
59
- answer = result['output']
60
- response = {
61
- "answer" : answer,
62
- "documents" : [{"link" : link, "passage" : passage} for link, passage in zip(links, passages)]
63
- }
64
- return response
 
 
65
 
66
 
67
  def chatbot_response (user_input, chat_history=[]):
 
1
+ import os
2
  import gradio as gr
 
3
  from langchain.vectorstores import Chroma
4
  from transformers import RagTokenizer, RagSequenceForGeneration
5
  from sentence_transformers import SentenceTransformer
6
+ from langchain.chains.question_answering import load_qa_chain
7
+ from langchain.llms import HuggingFaceLLM
8
 
9
  #Konstanten
10
  ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
 
22
  # Verbindung zur Chroma DB und Laden der Dokumente
23
  chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
24
 
25
+ # Erstellung eines HuggingFaceLLM Modells
26
+ llm = HuggingFaceLLM(model=model, tokenizer=tokenizer)
27
+
28
+
29
+
30
  # Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
31
+ #retriever = chroma_db.as_retriever()
32
 
33
  # Erstellung der RAG-Kette mit dem benutzerdefinierten Retriever
34
+ #rag_chain = RagChain(model=model, retriever=retriever, tokenizer=tokenizer, vectorstore=chroma_db)
35
  #############################################
36
 
37
 
 
52
 
53
 
54
 
55
+ def get_rag_response(question):
56
  global rag_chain
57
+
58
+ # Abfrage der relevanten Dokumente aus Chroma DB
59
+ docs = chroma_db.search(question, top_k=5)
60
+ passages = [doc['text'] for doc in docs]
61
+ links = [doc.get('url', 'No URL available') for doc in docs]
62
+
63
+ # Generieren der Antwort
64
+ answer = llm(question, docs)
65
+
66
+ # Zusammenstellen der Ausgabe
67
+ response = {
68
+ "answer": answer,
69
+ "documents": [{"link": link, "passage": passage} for link, passage in zip(links, passages)]
70
+ }
71
+
72
+ return response
73
+
74
 
75
 
76
  def chatbot_response (user_input, chat_history=[]):