Update app.py
Browse files
app.py
CHANGED
@@ -6,10 +6,10 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
|
6 |
from langchain.vectorstores import FAISS
|
7 |
from langchain.llms import CTransformers
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
9 |
-
from dl_hf_model import dl_hf_model
|
10 |
from ctransformers import AutoModelForCausalLM
|
11 |
from langchain_g4f import G4FLLM
|
12 |
from g4f import Provider, models
|
|
|
13 |
import requests
|
14 |
# Define the path for generated embeddings
|
15 |
DB_FAISS_PATH = 'vectorstore/db_faiss'
|
@@ -83,11 +83,26 @@ llm = load_llm()
|
|
83 |
|
84 |
# Create a conversational chain
|
85 |
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
|
|
|
|
|
86 |
|
87 |
# Function for conversational chat
|
88 |
def conversational_chat(query):
|
89 |
-
result =
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
return result["answer"]
|
92 |
|
93 |
# Initialize chat history
|
|
|
6 |
from langchain.vectorstores import FAISS
|
7 |
from langchain.llms import CTransformers
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
|
|
9 |
from ctransformers import AutoModelForCausalLM
|
10 |
from langchain_g4f import G4FLLM
|
11 |
from g4f import Provider, models
|
12 |
+
import spacy
|
13 |
import requests
|
14 |
# Define the path for generated embeddings
|
15 |
DB_FAISS_PATH = 'vectorstore/db_faiss'
|
|
|
83 |
|
84 |
# Create a conversational chain
|
85 |
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
|
86 |
+
# Initialize spaCy with the Japanese model
|
87 |
+
nlp = spacy.load("ja_core_news_sm")
|
88 |
|
89 |
# Function for conversational chat
|
90 |
def conversational_chat(query):
|
91 |
+
result = None
|
92 |
+
similarity_score = 0
|
93 |
+
# Set a threshold for similarity (you can adjust this)
|
94 |
+
similarity_threshold = 0.8
|
95 |
+
while similarity_score <= similarity_threshold:
|
96 |
+
result = chain({"question": query, "chat_history": st.session_state['history']})
|
97 |
+
doc = nlp(result["answer"])
|
98 |
+
|
99 |
+
# Define the Japanese phrase to match
|
100 |
+
target_phrase = query # Replace with your desired Japanese phrase
|
101 |
+
|
102 |
+
# Check for similarity
|
103 |
+
similarity_score = doc.similarity(nlp(target_phrase))
|
104 |
+
|
105 |
+
st.session_state['history'].append((query, result["answer"]))
|
106 |
return result["answer"]
|
107 |
|
108 |
# Initialize chat history
|