Spaces:
Sleeping
Sleeping
Merge branch 'main' of https://huggingface.co/spaces/kk53/rag_lithuania
Browse files
app.py
CHANGED
@@ -44,6 +44,26 @@ def load_model():
|
|
44 |
n_threads = 2# Verbose is required to pass to the callback manager
|
45 |
)
|
46 |
st.success("loaded the second NLP model from Hugging Face!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
# prompt_template = "<|system|>\
|
48 |
# </s>\
|
49 |
# <|user|>\
|
@@ -51,6 +71,8 @@ def load_model():
|
|
51 |
# <|assistant|>"
|
52 |
# template = prompt_template
|
53 |
# prompt = PromptTemplate.from_template(template)
|
|
|
|
|
54 |
# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
55 |
# llm = LlamaCpp(
|
56 |
# model_path=model_path_model,
|
@@ -66,6 +88,9 @@ def load_model():
|
|
66 |
|
67 |
st.title("Please ask your question on Lithuanian rules for foreigners.")
|
68 |
model,llm = load_model()
|
|
|
|
|
|
|
69 |
pc = Pinecone(api_key="003117b0-6caf-4de4-adf9-cc49da6587e6")
|
70 |
index = pc.Index("law")
|
71 |
question = st.text_input("Enter your question:")
|
@@ -81,10 +106,14 @@ response_t = response['matches'][0]['metadata']['text']
|
|
81 |
st.header("Answer:")
|
82 |
st.write(response_t)
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
44 |
n_threads = 2# Verbose is required to pass to the callback manager
|
45 |
)
|
46 |
st.success("loaded the second NLP model from Hugging Face!")
|
47 |
+
|
48 |
+
model_2_name = "TheBloke/zephyr-7B-beta-GGUF"
|
49 |
+
model_2base_name = "zephyr-7b-beta.Q4_K_M.gguf"
|
50 |
+
model_path_model = hf_hub_download(
|
51 |
+
repo_id=model_2_name,
|
52 |
+
filename=model_2base_name,
|
53 |
+
cache_dir= '/content/models' # Directory for the model
|
54 |
+
)
|
55 |
+
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
56 |
+
llm = LlamaCpp(
|
57 |
+
model_path=model_path_model,
|
58 |
+
temperature=0.75,
|
59 |
+
max_tokens=2500,
|
60 |
+
top_p=1,
|
61 |
+
callback_manager=callback_manager,
|
62 |
+
verbose=True,
|
63 |
+
n_ctx=2048,
|
64 |
+
n_threads = 2# Verbose is required to pass to the callback manager
|
65 |
+
)
|
66 |
+
st.success("loaded the second NLP model from Hugging Face!")
|
67 |
# prompt_template = "<|system|>\
|
68 |
# </s>\
|
69 |
# <|user|>\
|
|
|
71 |
# <|assistant|>"
|
72 |
# template = prompt_template
|
73 |
# prompt = PromptTemplate.from_template(template)
|
74 |
+
|
75 |
+
return model, llm
|
76 |
# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
77 |
# llm = LlamaCpp(
|
78 |
# model_path=model_path_model,
|
|
|
88 |
|
89 |
st.title("Please ask your question on Lithuanian rules for foreigners.")
|
90 |
model,llm = load_model()
|
91 |
+
pc = Pinecone(api_key=apikeys)
|
92 |
+
index = pc.Index("law")
|
93 |
+
model,llm = load_model()
|
94 |
pc = Pinecone(api_key="003117b0-6caf-4de4-adf9-cc49da6587e6")
|
95 |
index = pc.Index("law")
|
96 |
question = st.text_input("Enter your question:")
|
|
|
106 |
st.header("Answer:")
|
107 |
st.write(response_t)
|
108 |
|
109 |
+
query = model.create_embedding(question)
|
110 |
+
q = query['data'][0]['embedding']
|
111 |
+
response = index.query(
|
112 |
+
vector=q,
|
113 |
+
top_k=1,
|
114 |
+
include_metadata = True,
|
115 |
+
namespace = "ns1"
|
116 |
+
)
|
117 |
+
response_t = response['matches'][0]['metadata']['text']
|
118 |
+
st.header("Answer:")
|
119 |
+
st.write(response_t)
|