Raghuan commited on
Commit
d583044
·
verified ·
1 Parent(s): 1f8542e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -18
app.py CHANGED
@@ -19,7 +19,9 @@ def extract_relevant_context(context, max_length=300):
19
 
20
  # Function to format the final output
21
  def format_response(query, answer):
22
- return f"Query: {query}\n\nAnswer: {answer}"
 
 
23
 
24
  # Streamlit UI setup
25
  st.title("Chatbot Interface")
@@ -49,36 +51,20 @@ if query and dataset:
49
  # Search in the vector database for the closest matches
50
  distances, indices = index.search(query_embedding, k=1) # Limit to the top 1 result
51
 
52
- # Debug: Display the index search results
53
- st.write("Debug: Index search results:", indices, distances)
54
-
55
  # Retrieve the most relevant context from the metadata
56
  retrieved_context = metadata[indices[0][0]] # Get the top 1 context
57
 
58
- # Debug: Display the retrieved context
59
- st.write("Debug: Retrieved context:", retrieved_context)
60
-
61
  # Extract the most relevant part of the context
62
  relevant_context = extract_relevant_context(retrieved_context)
63
 
64
- # Debug: Display the relevant context
65
- st.write("Debug: Relevant context used in prompt:", relevant_context)
66
-
67
  # Construct a concise prompt
68
- prompt = f"Context: {relevant_context}\n\nQuery: {query}\n\nAnswer concisely:"
69
-
70
- # Debug: Display the final prompt
71
- st.write("Debug: Prompt sent to model:", prompt)
72
-
73
  result = query_huggingface_model(prompt)
74
 
75
  # Handling the list result and clean up output
76
  if isinstance(result, list) and len(result) > 0:
77
  generated_text = result[0].get("generated_text", "No response from model")
78
 
79
- # Debug: Display the generated text from the model
80
- st.write("Debug: Generated text from model:", generated_text)
81
-
82
  # Format the output to show only the query and the answer
83
  final_output = format_response(query, generated_text.strip())
84
 
 
19
 
20
  # Function to format the final output
21
  def format_response(query, answer):
22
+ # Strip out everything after the first occurrence of the word "Query" to avoid multiple answers
23
+ split_answer = answer.split("Query")[0]
24
+ return f"Query: {query}\n\nAnswer: {split_answer.strip()}"
25
 
26
  # Streamlit UI setup
27
  st.title("Chatbot Interface")
 
51
  # Search in the vector database for the closest matches
52
  distances, indices = index.search(query_embedding, k=1) # Limit to the top 1 result
53
 
 
 
 
54
  # Retrieve the most relevant context from the metadata
55
  retrieved_context = metadata[indices[0][0]] # Get the top 1 context
56
 
 
 
 
57
  # Extract the most relevant part of the context
58
  relevant_context = extract_relevant_context(retrieved_context)
59
 
 
 
 
60
  # Construct a concise prompt
61
+ prompt = f"Context: {relevant_context}\n\nQuery: {query}\n\nAnswer only the query concisely:"
 
 
 
 
62
  result = query_huggingface_model(prompt)
63
 
64
  # Handling the list result and clean up output
65
  if isinstance(result, list) and len(result) > 0:
66
  generated_text = result[0].get("generated_text", "No response from model")
67
 
 
 
 
68
  # Format the output to show only the query and the answer
69
  final_output = format_response(query, generated_text.strip())
70