Ferdi commited on
Commit
8ea257e
1 Parent(s): c550f3d

change langchain imports

Browse files
Files changed (4) hide show
  1. instructions.txt +18 -0
  2. src/app.py +6 -1
  3. src/conversation.py +5 -4
  4. src/vector_index.py +2 -3
instructions.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Using the detailed context provided and the specific input regarding the client's case, follow these instructions to assist in drafting a petition related to French employment law.
2
+
3
+ 1. **Client Case Input**: {input}
4
+
5
+ 2. **Document Context Input**: {context}
6
+
7
+ Given the context and input, perform the following steps:
8
+
9
+ - Understand the specifics of the client's case and the context provided by the retrieved documents.
10
+ - Analyze the legal framework and precedents to identify applicable legal principles and arguments that can support the client's position.
11
+ - Generate a draft petition that includes:
12
+ - A tailored statement of facts based on the client's input.
13
+ - A presentation of legal issues informed by the document context.
14
+ - A compelling argumentation section that leverages insights from the document context.
15
+ - A conclusion outlining the desired remedies or relief, in accordance with French employment law.
16
+
17
+ Ensure the draft is coherent, precise, and aligned with current legal standards and practices, maintaining confidentiality and adapting the content to the unique aspects of the client's case.
18
+ Please note that the entire draft, including legal arguments and factual descriptions, should be prepared in French to comply with the requirements of the French legal system.
src/app.py CHANGED
@@ -1,6 +1,11 @@
1
  import gradio as gr
2
  from utils import *
3
 
 
 
 
 
 
4
  with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=gr.themes.colors.purple)) as demo:
5
  with gr.Row():
6
 
@@ -11,7 +16,7 @@ with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=
11
  vector_index_btn = gr.Button('Create vector store', variant='primary',scale=1)
12
  vector_index_msg_out = gr.Textbox(show_label=False, lines=1,scale=1, placeholder="Creating vectore store ...")
13
 
14
- instruction = gr.Textbox(label="System instruction", lines=3, value="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only.If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive.")
15
 
16
  with gr.Accordion(label="Text generation tuning parameters"):
17
  temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.1, step=0.05)
 
1
  import gradio as gr
2
  from utils import *
3
 
4
+ file_path = 'instructions.txt'
5
+
6
+ with open(file_path, 'r', encoding='utf-8') as file:
7
+ instruction_text = file.read()
8
+
9
  with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=gr.themes.colors.purple)) as demo:
10
  with gr.Row():
11
 
 
16
  vector_index_btn = gr.Button('Create vector store', variant='primary',scale=1)
17
  vector_index_msg_out = gr.Textbox(show_label=False, lines=1,scale=1, placeholder="Creating vectore store ...")
18
 
19
+ instruction = gr.Textbox(label="System instruction", lines=3, value=instruction_text)
20
 
21
  with gr.Accordion(label="Text generation tuning parameters"):
22
  temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.1, step=0.05)
src/conversation.py CHANGED
@@ -1,9 +1,10 @@
1
  from langchain.chains import ConversationalRetrievalChain
2
- from langchain.chat_models import ChatOpenAI
3
  from langchain.prompts import PromptTemplate
4
  import pinecone
5
  from langchain_community.vectorstores import Pinecone
6
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
 
7
  import os
8
 
9
  openai_api_key = os.environ.get("OPENAI_API_KEY")
@@ -34,9 +35,9 @@ class Conversation_RAG:
34
  def create_conversation(self, model, vectordb, k_context=5, instruction="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only. If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive."):
35
 
36
  template = instruction + """
37
- context:\n
38
  {context}\n
39
- data: {question}\n
40
  """
41
 
42
  QCA_PROMPT = PromptTemplate(input_variables=["instruction", "context", "question"], template=template)
 
1
  from langchain.chains import ConversationalRetrievalChain
2
+ from langchain_openai import ChatOpenAI
3
  from langchain.prompts import PromptTemplate
4
  import pinecone
5
  from langchain_community.vectorstores import Pinecone
6
+ from langchain_community.embeddings import HuggingFaceEmbeddings
7
+
8
  import os
9
 
10
  openai_api_key = os.environ.get("OPENAI_API_KEY")
 
35
  def create_conversation(self, model, vectordb, k_context=5, instruction="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only. If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive."):
36
 
37
  template = instruction + """
38
+ **Document Context Input**:\n
39
  {context}\n
40
+ **Client Case Input**: {question}\n
41
  """
42
 
43
  QCA_PROMPT = PromptTemplate(input_variables=["instruction", "context", "question"], template=template)
src/vector_index.py CHANGED
@@ -1,8 +1,7 @@
1
  from pinecone import Pinecone
2
- from langchain_community.document_loaders import Docx2txtLoader
3
- from langchain.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
6
  import os, uuid
7
 
8
  def create_vector_store_index(file_path):
 
1
  from pinecone import Pinecone
2
+ from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader
 
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
  import os, uuid
6
 
7
  def create_vector_store_index(file_path):