Spaces:
Sleeping
Sleeping
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, \ | |
HumanMessagePromptTemplate | |
from langchain.llms import OpenAI | |
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI | |
from langchain.cache import InMemoryCache | |
import langchain | |
langchain.llm_cache = InMemoryCache() | |
import pandas as pd | |
import os | |
from langchain.chains import LLMChain | |
from langchain.output_parsers import PydanticOutputParser | |
from pydantic import BaseModel, Field | |
os.environ["OPENAI_API_KEY"] = "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" | |
class JSONPath_Generator: | |
def __init__(self, json_input, target_value, json_condition): | |
self.json_input = json_input | |
self.target_value = target_value | |
self.json_condition = json_condition | |
# os.environ["OPENAI_API_KEY"] = "4b81012d55fb416c9e398f6149c3071e" | |
# self.model = ChatOpenAI() | |
# os.environ["OPENAI_API_TYPE"] = "azure" | |
# os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" | |
# self.model = AzureChatOpenAI( | |
# # openaikey=openaikey, | |
# # openai_api_version="2023-03-15-preview", | |
# # azure_deployment="text-davinci-003", | |
# # temperature=0, | |
# deployment_name="gpt-4", | |
# model_name="gpt-4", | |
# ) | |
self.model = OpenAI( | |
temperature=0, | |
# openai_api_key=self.api_key, | |
model_name="gpt-3.5-turbo-instruct" | |
) | |
def create_chat_prompt(self): | |
# System Template | |
with open("system_template.txt", "r") as sys_temp: | |
system_template = sys_temp.read().strip() | |
system_prompt = SystemMessagePromptTemplate.from_template(system_template) | |
# Humman Template | |
with open("human_template.txt", "r") as hum_temp: | |
human_template = hum_temp.read().strip() | |
if self.json_condition != '': | |
human_template += " provided the {json_condition}" | |
human_prompt = HumanMessagePromptTemplate.from_template(human_template) | |
# Chat Prompt | |
self.chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt]) | |
def create_llm_chain(self): | |
# self.read_extract_api_details() | |
self.create_chat_prompt() | |
chain = LLMChain(llm=self.model, prompt=self.chat_prompt) | |
self.response = chain.run(Target_value=self.target_value,json_condition = self.json_condition, | |
JSON_Input=self.json_input) | |
return self.response |