leothesouthafrican commited on
Commit
e54b26e
·
1 Parent(s): 11286a0

hf fix new branch

Browse files
Dockerfile CHANGED
@@ -1,59 +1,38 @@
1
  #------------------------
2
- #------------------------
3
  # STAGE 1: Backend build
4
  #------------------------
5
- #------------------------
6
  FROM python:3.9-slim-buster as backend
7
  WORKDIR /app/backend
8
 
 
9
  COPY requirements.txt .
10
  COPY build_scripts/build.sh .
11
 
 
12
  ENV STAGE=backend
13
- RUN chmod +x ./build.sh && ./build.sh
14
- COPY requirements.txt .
15
- COPY build_scripts/build.sh .
16
-
17
- ENV STAGE=backend
18
- RUN chmod +x ./build.sh && ./build.sh
19
-
20
- # Make the script executable
21
- RUN chmod +x ./build.sh
22
- # Make the script executable
23
- RUN chmod +x ./build.sh
24
 
25
- # Execute the script with backend as argument
26
- RUN ./build.sh backend
27
- # Execute the script with backend as argument
28
- RUN ./build.sh backend
29
  COPY web_app/backend/ .
30
 
31
  CMD ["python", "run.py"]
32
- #-------------------------
33
  #-------------------------
34
  # STAGE 2: Frontend build
35
  #-------------------------
36
- #-------------------------
37
  FROM node:14-alpine as frontend
38
  WORKDIR /app/frontend
39
 
40
- # Grouped copy for package-related files
41
- COPY web_app/frontend/package*.json .
42
- # Grouped copy for package-related files
43
  COPY web_app/frontend/package*.json .
44
  COPY web_app/frontend/ .
45
  COPY build_scripts/build.sh .
46
- COPY build_scripts/build.sh .
47
 
 
48
  ENV STAGE=frontend
49
- RUN chmod +x ./build.sh && ./build.sh
50
- ENV STAGE=frontend
51
- RUN chmod +x ./build.sh && ./build.sh
52
 
53
- # Execute the script with frontend as argument
54
- RUN ./build.sh frontend
55
- # Execute the script with frontend as argument
56
- RUN ./build.sh frontend
57
  EXPOSE 3000
58
-
59
  CMD ["serve", "-s", "dist", "-l", "3000"]
 
1
  #------------------------
 
2
  # STAGE 1: Backend build
3
  #------------------------
 
4
  FROM python:3.9-slim-buster as backend
5
  WORKDIR /app/backend
6
 
7
+ # Copy necessary files
8
  COPY requirements.txt .
9
  COPY build_scripts/build.sh .
10
 
11
+ # Set environment variable, make the script executable and run it
12
  ENV STAGE=backend
13
+ RUN chmod +x ./build.sh && \
14
+ ./build.sh backend && \
15
+ rm -rf /var/lib/apt/lists/*
 
 
 
 
 
 
 
 
16
 
17
+ # Copy the rest of the backend app
 
 
 
18
  COPY web_app/backend/ .
19
 
20
  CMD ["python", "run.py"]
21
+
22
  #-------------------------
23
  # STAGE 2: Frontend build
24
  #-------------------------
 
25
  FROM node:14-alpine as frontend
26
  WORKDIR /app/frontend
27
 
28
+ # Grouped copy for package-related files and the frontend app
 
 
29
  COPY web_app/frontend/package*.json .
30
  COPY web_app/frontend/ .
31
  COPY build_scripts/build.sh .
 
32
 
33
+ # Set environment variable, make the script executable and run it
34
  ENV STAGE=frontend
35
+ RUN chmod +x ./build.sh && ./build.sh frontend
 
 
36
 
 
 
 
 
37
  EXPOSE 3000
 
38
  CMD ["serve", "-s", "dist", "-l", "3000"]
requirements.txt CHANGED
@@ -121,7 +121,7 @@ prometheus-client==0.17.1
121
  prompt-toolkit==3.0.39
122
  protobuf==4.23.4
123
  psutil==5.9.5
124
- psycopg2-binary==2.9.1
125
  ptyprocess==0.7.0
126
  pulsar-client==3.2.0
127
  pure-eval==0.2.2
 
121
  prompt-toolkit==3.0.39
122
  protobuf==4.23.4
123
  psutil==5.9.5
124
+ #psycopg2-binary==2.9.1
125
  ptyprocess==0.7.0
126
  pulsar-client==3.2.0
127
  pure-eval==0.2.2
web_app/backend/app/answers_model.py CHANGED
@@ -1,5 +1,5 @@
1
  import pinecone
2
- from dotenv import load_dotenv
3
  from langchain.embeddings.openai import OpenAIEmbeddings
4
  from langchain.memory import ConversationBufferMemory
5
  from langchain.vectorstores import Pinecone
@@ -8,7 +8,7 @@ from langchain.chains import RetrievalQA
8
  from langchain import PromptTemplate
9
 
10
  # Load environment variables
11
- load_dotenv()
12
 
13
  # Setting up embeddings
14
  embedder = OpenAIEmbeddings()
@@ -62,3 +62,14 @@ qa = RetrievalQA.from_chain_type(
62
  def generate_question_response(query: str) -> str:
63
  return qa.run(query)
64
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import pinecone
2
+ from dotenv import load_dotenv, find_dotenv
3
  from langchain.embeddings.openai import OpenAIEmbeddings
4
  from langchain.memory import ConversationBufferMemory
5
  from langchain.vectorstores import Pinecone
 
8
  from langchain import PromptTemplate
9
 
10
  # Load environment variables
11
+ load_dotenv(find_dotenv())
12
 
13
  # Setting up embeddings
14
  embedder = OpenAIEmbeddings()
 
62
  def generate_question_response(query: str) -> str:
63
  return qa.run(query)
64
 
65
+ if __name__ == "__main__":
66
+ # Sample query
67
+ test_query = "What is the capital of France?"
68
+
69
+ # Generate a response using the model
70
+ response = generate_question_response(test_query)
71
+
72
+ # Print the query and its response
73
+ print(f"Query: {test_query}")
74
+ print(f"Response: {response}")
75
+
web_app/backend/app/chatbot_routes.py CHANGED
@@ -1,16 +1,22 @@
1
  from app import app
2
  from flask import send_from_directory, jsonify, request
3
  import os
4
- from dotenv import load_dotenv
5
  from app.question_model import generate_response
6
  from app.answers_model import generate_question_response
7
  from fpdf import FPDF
8
  import yagmail
9
 
10
- print(os.getcwd())
11
-
12
  # Load the environment variables
13
- load_dotenv()
 
 
 
 
 
 
 
 
14
 
15
  # Route to handle user prompt questions
16
  @app.route('/sendText', methods=['POST'])
@@ -25,8 +31,8 @@ def handle_text():
25
 
26
  return jsonify(message='Received!', output=output), 200
27
  except Exception as e:
28
- print(f'Error occurred: {e}')
29
- return jsonify(message='Error occurred', error=str(e), type=str(type(e))), 500
30
 
31
  # Route to handle suggested questions
32
  @app.route('/sendQuestions', methods=['POST'])
@@ -50,8 +56,8 @@ def handle_questions():
50
 
51
  return jsonify(message='Received!', output=output), 200
52
  except Exception as e:
53
- print(f'Error occurred: {e}')
54
- return jsonify(message='Error occurred', error=str(e), type=str(type(e))), 500
55
 
56
  @app.route('/exportChat', methods=['POST'])
57
  def export_chat():
 
1
  from app import app
2
  from flask import send_from_directory, jsonify, request
3
  import os
4
+ from dotenv import load_dotenv, find_dotenv
5
  from app.question_model import generate_response
6
  from app.answers_model import generate_question_response
7
  from fpdf import FPDF
8
  import yagmail
9
 
 
 
10
  # Load the environment variables
11
+ load_dotenv(find_dotenv())
12
+
13
+ print(f"Loading .env from: {find_dotenv()}")
14
+
15
+ # Add a route to help with error handling
16
+ @app.errorhandler(500)
17
+ def internal_error(error):
18
+ app.logger.error("Server Error: %s", error)
19
+ return jsonify(message="Internal Server Error"), 500
20
 
21
  # Route to handle user prompt questions
22
  @app.route('/sendText', methods=['POST'])
 
31
 
32
  return jsonify(message='Received!', output=output), 200
33
  except Exception as e:
34
+ app.logger.error(f"Error occurred during sendText: {e}")
35
+ return jsonify(message='Error occurred during sendText', error=str(e), type=str(type(e))), 500
36
 
37
  # Route to handle suggested questions
38
  @app.route('/sendQuestions', methods=['POST'])
 
56
 
57
  return jsonify(message='Received!', output=output), 200
58
  except Exception as e:
59
+ app.logger.error(f"Error occurred during sendText: {e}")
60
+ return jsonify(message='Error occurred during sendText', error=str(e), type=str(type(e))), 500
61
 
62
  @app.route('/exportChat', methods=['POST'])
63
  def export_chat():
web_app/backend/app/question_model.py CHANGED
@@ -4,10 +4,12 @@ from langchain.embeddings.openai import OpenAIEmbeddings
4
  from langchain.prompts import PromptTemplate
5
  from langchain.chat_models import ChatOpenAI
6
  from langchain.chains import LLMChain
7
- from dotenv import load_dotenv
8
  import os
9
 
10
- load_dotenv()
 
 
11
 
12
  # Load the data
13
  # Get the directory of the current file
 
4
  from langchain.prompts import PromptTemplate
5
  from langchain.chat_models import ChatOpenAI
6
  from langchain.chains import LLMChain
7
+ from dotenv import load_dotenv, find_dotenv
8
  import os
9
 
10
+ # Load the environment variables
11
+ load_dotenv(find_dotenv())
12
+
13
 
14
  # Load the data
15
  # Get the directory of the current file
web_app/backend/run.py CHANGED
@@ -1,4 +1,4 @@
1
  from app import app
2
 
3
  if __name__ == "__main__":
4
- app.run(host="0.0.0.0", port=5000)
 
1
  from app import app
2
 
3
  if __name__ == "__main__":
4
+ app.run(host="0.0.0.0", port=5000, debug=True)
web_app/frontend/src/hooks/useChat.js CHANGED
@@ -24,8 +24,8 @@ export default function useChatHandler() {
24
  setLoading(true);
25
  let success = false;
26
  try {
27
- const sendTextResponse = await axios.post('http://127.0.0.1:5000/sendText', { text });
28
- const sendQuestionsResponse = await axios.post('http://127.0.0.1:5000/sendQuestions', { text });
29
 
30
  const newChatItem = {
31
  question: text,
@@ -60,7 +60,7 @@ export default function useChatHandler() {
60
  setChatHistory([...chatHistory]);
61
 
62
  try {
63
- const response = await axios.post('http://127.0.0.1:5000/sendText', {
64
  text: chatHistory[chatItemIndex].followUps[followUpIndex].text
65
  });
66
 
@@ -69,7 +69,7 @@ export default function useChatHandler() {
69
  chatHistory[chatItemIndex].followUps[followUpIndex].loading = false;
70
 
71
  // Get new follow-up questions based on the follow-up response.
72
- const sendQuestionsResponse = await axios.post('http://127.0.0.1:5000/sendQuestions', { text: response.data.output });
73
 
74
  // Add to FollowUp array
75
  const newFollowUps = sendQuestionsResponse.data.output.followUps.map(followUp => ({
 
24
  setLoading(true);
25
  let success = false;
26
  try {
27
+ const sendTextResponse = await axios.post('http://localhost:5000/sendText', { text });
28
+ const sendQuestionsResponse = await axios.post('http://localhost:5000/sendQuestions', { text });
29
 
30
  const newChatItem = {
31
  question: text,
 
60
  setChatHistory([...chatHistory]);
61
 
62
  try {
63
+ const response = await axios.post('http://localhost:5000/sendText', {
64
  text: chatHistory[chatItemIndex].followUps[followUpIndex].text
65
  });
66
 
 
69
  chatHistory[chatItemIndex].followUps[followUpIndex].loading = false;
70
 
71
  // Get new follow-up questions based on the follow-up response.
72
+ const sendQuestionsResponse = await axios.post('http://localhost:5000/sendQuestions', { text: response.data.output });
73
 
74
  // Add to FollowUp array
75
  const newFollowUps = sendQuestionsResponse.data.output.followUps.map(followUp => ({