reintegrated training
Browse files- agent/__init__.py +0 -0
- app.py +7 -1
- faq_train.py +0 -20
- train/faq.py +22 -0
agent/__init__.py
DELETED
File without changes
|
app.py
CHANGED
@@ -9,4 +9,10 @@ load_dotenv()
|
|
9 |
@app.route("/", methods=['GET','POST'])
|
10 |
def index():
|
11 |
from agent._create import agent_executor
|
12 |
-
return agent_executor();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
@app.route("/", methods=['GET','POST'])
|
10 |
def index():
|
11 |
from agent._create import agent_executor
|
12 |
+
return agent_executor();
|
13 |
+
|
14 |
+
|
15 |
+
@app.route("/train/faq", methods=['GET','POST'])
|
16 |
+
def train_faq():
|
17 |
+
from train.faq import train
|
18 |
+
return train();
|
faq_train.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from langchain_community.document_loaders.csv_loader import CSVLoader
|
2 |
-
from langchain.text_splitter import CharacterTextSplitter
|
3 |
-
from langchain_openai import OpenAIEmbeddings
|
4 |
-
from langchain_community.vectorstores.faiss import FAISS
|
5 |
-
from dotenv import load_dotenv
|
6 |
-
from langchain.document_loaders import WebBaseLoader
|
7 |
-
|
8 |
-
load_dotenv();
|
9 |
-
|
10 |
-
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
11 |
-
|
12 |
-
# Split document in chunks
|
13 |
-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
|
14 |
-
docs = text_splitter.split_documents(documents=documents)
|
15 |
-
|
16 |
-
embeddings = OpenAIEmbeddings()
|
17 |
-
# Create vectors
|
18 |
-
vectorstore = FAISS.from_documents(docs, embeddings)
|
19 |
-
# Persist the vectors locally on disk
|
20 |
-
vectorstore.save_local("_rise_faq_db");
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train/faq.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
def train():
|
3 |
+
from langchain_community.document_loaders.csv_loader import CSVLoader
|
4 |
+
from langchain.text_splitter import CharacterTextSplitter
|
5 |
+
from langchain_openai import OpenAIEmbeddings
|
6 |
+
from langchain_community.vectorstores.faiss import FAISS
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain.document_loaders import WebBaseLoader
|
9 |
+
|
10 |
+
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
11 |
+
|
12 |
+
# Split document in chunks
|
13 |
+
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
|
14 |
+
docs = text_splitter.split_documents(documents=documents)
|
15 |
+
|
16 |
+
embeddings = OpenAIEmbeddings()
|
17 |
+
# Create vectors
|
18 |
+
vectorstore = FAISS.from_documents(docs, embeddings)
|
19 |
+
# Persist the vectors locally on disk
|
20 |
+
vectorstore.save_local("_rise_faq_db");
|
21 |
+
|
22 |
+
return {"trained":"success"}
|