justic_leauge_lawyer_assistant / Prompts_and_Chains.py
Shashank1406's picture
Upload 6 files
925601e verified
raw
history blame
No virus
2.18 kB
# ******* THIS FILE CONTAINS ALL THE PROMPTS & CHAINS USED IN Functions.py ***********
from Templates import *
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st
class PromptTemplates:
def __init__(self):
self.legal_adviser_bot_prompt = PromptTemplate(
input_variables=["chat_history","input",], template=legal_adviser_template
)
self.case_summary_prompt = PromptTemplate(
input_variables=["case_name", "case_info"], template=case_summary_template
)
self.legal_case_bot_prompt = PromptTemplate(
input_variables=["case_summary", "context","input"], template=legal_case_bot_template
)
self.lawyer_recommendations_prompt = PromptTemplate(
input_variables=["user_inputs", "matching_lawyers", "additional_info"], template=lawyer_recommendation_template
)
class LLMChains:
def __init__(self):
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
obj = PromptTemplates()
model_name = st.session_state["selected_model"]
# generate summary chain
self.legal_adviser_bot_chain = LLMChain(
llm=OpenAI(model_name='gpt-3.5-turbo-16k', temperature=0.7),
prompt=obj.legal_adviser_bot_prompt,
verbose="true",
)
# genrate bot conversastion
self.case_summary_chain = LLMChain(
llm=OpenAI(model_name=model_name, temperature=0.7),
prompt=obj.case_summary_prompt,
verbose="true",
)
# genrate bot conversastion
self.legal_case_bot_chain = LLMChain(
llm=OpenAI(model_name=model_name, temperature=0.7),
prompt=obj.legal_case_bot_prompt,
verbose="true",
)
self.lawyer_recommendations_chain = LLMChain(
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7),
prompt=obj.lawyer_recommendations_prompt,
verbose="true",
)