from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, \ HumanMessagePromptTemplate from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI, AzureChatOpenAI from langchain.cache import InMemoryCache import langchain langchain.llm_cache = InMemoryCache() import pandas as pd import os from langchain.chains import LLMChain from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field class JSONPath_Generator: def __init__(self, json_input, target_value, json_condition, openai_key): self.json_input = json_input self.target_value = target_value self.json_condition = json_condition self.openai_api_key = openai_key self.model = OpenAI( temperature=0, openai_api_key=self.openai_api_key, model_name="gpt-3.5-turbo-instruct" ) def create_chat_prompt(self): # System Template with open("system_template.txt", "r") as sys_temp: system_template = sys_temp.read().strip() system_prompt = SystemMessagePromptTemplate.from_template(system_template) # Humman Template with open("human_template.txt", "r") as hum_temp: human_template = hum_temp.read().strip() if self.json_condition != '': human_template += " provided the {json_condition}" human_prompt = HumanMessagePromptTemplate.from_template(human_template) # Chat Prompt self.chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt]) def create_llm_chain(self): # self.read_extract_api_details() self.create_chat_prompt() chain = LLMChain(llm=self.model, prompt=self.chat_prompt) self.response = chain.run(Target_value=self.target_value,json_condition = self.json_condition, JSON_Input=self.json_input) return self.response