sachin19566 commited on
Commit
4790e0d
1 Parent(s): f3bca1d

Upload 6 files

Browse files
Files changed (6) hide show
  1. PizzaGPT.py +39 -0
  2. Readme.md +16 -0
  3. app.py +18 -0
  4. custom_instruction.txt +1 -0
  5. langchain_helper.py +32 -0
  6. requirements.txt +6 -0
PizzaGPT.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ # Load environment variables from .env file
6
+ load_dotenv()
7
+
8
+ # Access API keys
9
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
10
+
11
+
12
+ MODEL_NAME = "gpt-3.5-turbo"
13
+ TEMPERATURE = 0
14
+ MAX_OUTPUT_TOKENS = 100
15
+
16
+
17
+ class PizzaGPT:
18
+
19
+ def __init__(self):
20
+ self.__client = OpenAI()
21
+ with open("custom_instruction.txt") as fileHandle:
22
+ self.__custom_instructions = fileHandle.read()
23
+
24
+ self.__message = [{"role" : "system", "content" : "Act as a information extractor"},
25
+ {"role" : "system", "content" : self.__custom_instructions}]
26
+
27
+
28
+ def chat(self, messages):
29
+ message_context = []
30
+ message_context.extend(self.__message)
31
+ message_context.extend(messages)
32
+
33
+ completion = self.__client.chat.completions.create(
34
+ model=MODEL_NAME,
35
+ messages= message_context,
36
+ temperature=TEMPERATURE,
37
+ max_tokens=MAX_OUTPUT_TOKENS)
38
+
39
+ return completion.choices[0].message.content
Readme.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## To run the app in local
2
+ 1. Clone the repo, switch to `dev` branch
3
+ 2. Create a conda environment using the following command:
4
+ ```
5
+ conda create -p env python==3.10 -y
6
+ ```
7
+ 3. Activate the environment
8
+ `conda activate /Users/sachinmishra/Desktop/Pizza-gpt/env`
9
+
10
+ 4. Install all the required package using the following command:
11
+ `pip install -r requirements.txt`
12
+
13
+ 5. Rename the `.env.dist` file to `.env` and provide the OPENAI_API_KEY properly.
14
+
15
+ 6. Run the app - `streamlit run main.py`
16
+ 7. To test only llm part, run the `langchain_helper.py` file using `python langchain_helper.py`
app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_helper import generate_json
3
+
4
+
5
+ st.title("Agent Information Generator")
6
+
7
+ # Text input box for the user to enter information
8
+ text_input = st.text_area("Enter text:", height=200)
9
+
10
+ if st.button("Generate JSON"):
11
+ if text_input:
12
+ # Generate JSON using OpenAI chat model
13
+ generated_json = generate_json(text_input)
14
+
15
+ # Display the generated JSON
16
+ st.json(generated_json)
17
+ else:
18
+ st.warning("Please enter text before generating JSON.")
custom_instruction.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Act as a information extractor from the sentences.You should extract the information like Agent Type, Agent Profile, PAN details, Email ID, Contact Number, First Name, Last Name, Gender, Date of Birth, Address, Pincode, City, State, Occupation from the sentences and return all the fields and corresponding value in json format. If any field is missing in the sentence then ask user to provide that information and add it to json schema.
langchain_helper.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain.llms import OpenAI
4
+ from langchain.prompts import PromptTemplate
5
+
6
+
7
+ # Set your OpenAI API key (using .env file)
8
+ load_dotenv()
9
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
10
+
11
+
12
+ llm = OpenAI(temperature=0)
13
+
14
+ def generate_json(sentence):
15
+
16
+ prompt_template_name = PromptTemplate(
17
+ input_variables =['sentence'],
18
+ template = '''Act as a information extractor from the sentences.You should extract the information like Agent Type, Agent Profile, PAN details, Email ID, Contact Number, First Name, Last Name, Gender, Date of Birth, Address, Pincode, City, State, Occupation from the sentences and return all the fields and corresponding value in json format.
19
+ If any field is missing in the sentence then ask user to provide that information and add it to json schema. If any of the field is not provided in the sentence then don't add the things by your own.
20
+ Provided sentence is: {sentence}'''
21
+ )
22
+
23
+ p = prompt_template_name.format(sentence=sentence)
24
+ print(p)
25
+
26
+ response = llm.predict(p)
27
+
28
+ return response
29
+
30
+ if __name__ == "__main__":
31
+ print(generate_json("My name is Mr x and my address is H24 house, my pan details is XYz."))
32
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai==1.10.0
2
+ redis==5.0.1
3
+ Flask==3.0.1
4
+ python-dotenv==1.0.1
5
+ streamlit==1.30.0
6
+ langchain==0.1.4