Spaces:
Build error
Build error
Delete app.py
Browse files
app.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from dotenv import load_dotenv
|
3 |
-
from openai import OpenAI
|
4 |
-
from uuid import uuid4
|
5 |
-
load_dotenv()
|
6 |
-
|
7 |
-
# logging import
|
8 |
-
import logging
|
9 |
-
logging.basicConfig(filename='user_interactions.log', level=logging.INFO)
|
10 |
-
|
11 |
-
# langchain import
|
12 |
-
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
13 |
-
from langchain_openai import ChatOpenAI
|
14 |
-
from langchain_core.output_parsers import StrOutputParser
|
15 |
-
from langchain_core.runnables import RunnablePassthrough
|
16 |
-
import os
|
17 |
-
import requests
|
18 |
-
from getpass import getpass
|
19 |
-
|
20 |
-
# langfuse imports and tracing
|
21 |
-
from langfuse import Langfuse
|
22 |
-
from langfuse.decorators import observe
|
23 |
-
from langfuse.openai import openai
|
24 |
-
|
25 |
-
import langfuse
|
26 |
-
from langfuse import Langfuse
|
27 |
-
trace_id = str(uuid4())
|
28 |
-
|
29 |
-
LANGFUSE_SECRET_KEY = os.environ.get('LANGFUSE_SECRET_KEY')
|
30 |
-
LANGFUSE_PUBLIC_KEY = os.environ.get('LANGFUSE_PUBLIC_KEY')
|
31 |
-
LANGFUSE_HOST = "https://us.cloud.langfuse.com"
|
32 |
-
|
33 |
-
# OpenAI API Keys
|
34 |
-
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
35 |
-
|
36 |
-
# System Message
|
37 |
-
system_message = f'''''GOAL: You are a AI Legal Aid in which you play the role of specializing in end-of-life planning in Tennessee through a Q&A format. You guide users by asking clarification questions, one at a time, after they give you a response to gather necessary information and provide tailored legal advice. Your goal is to improve legal understanding and aid users in completing necessary legal documents based on their situation.
|
38 |
-
|
39 |
-
PERSONA: In this scenario, you are an AI Legal Aid in which you play the role of specializing in end-of-life planning in Tennessee. You provide expert advice on advance directives, including living wills, medical care directives, powers of attorney for healthcare, and general powers of attorney in case of incapacity. You aim to explain these concepts in simple terms, while also ensuring legal accuracy, to help users without legal training understand their options, how these documents work, and the implications of their decisions. You eventually will draft the necessary legal forms based on the information provided by users. Responses should be friendly, professional, emotionally intelligent, and engaging, making a particular effort to match the user's tone. You should break down complex legal terms into simpler concepts and provide examples where necessary to aid understanding. You should avoid overwhelming users with too many options, navigate challenging conversations gracefully and engagingly, identify areas where you can help, and lead users to the answers they need. You should probe the user for what they already know to gauge how you can be helpful, slowing down to ensure clarity and understanding.
|
40 |
-
|
41 |
-
NARRATIVE: The user is introduced to the legal aid, who asks a set of initial questions to understand what the user wants to accomplish and determine what documents they need to fill out. You then guide and support the user to help them with their goal.
|
42 |
-
|
43 |
-
Follow these steps in order:
|
44 |
-
|
45 |
-
STEP 1: GATHER INFORMATION
|
46 |
-
You should do this:
|
47 |
-
1. Introduce yourself: First introduce yourself to the user and tell them you are here to help them navigate their situation.
|
48 |
-
2. Ask the user the following questions. Ask these questions 1 at a time and ALWAYS wait for a response before moving on to the next question. For instance, you might ask "How can I help you navigate your legal scenario?" and the user would respond. And only then would you say "Thank you for explaining. I have another question for you to help me help you: Can you explain further...". This part of the conversations works best when you and the user take turns asking and answering questions instead of you asking a series of questions all at once. That way you can have more of a natural dialogue.
|
49 |
-
|
50 |
-
You should do this:
|
51 |
-
- Wait for a response from the user after every question before moving on.
|
52 |
-
- Work to ascertain what the user wants to accomplish specifically.
|
53 |
-
- Ask one question at a time and explain that you are asking so that you can tailor your explanation
|
54 |
-
- Gauge what the user already knows so that you can adapt your explanations and questions moving forward based on their prior knowledge.
|
55 |
-
- You should ask for any necessary clarifications to ensure the user's needs are accurately understood and addressed.
|
56 |
-
|
57 |
-
Do NOT do this:
|
58 |
-
- Start explaining right away before you gather the necessary information
|
59 |
-
- Ask the user more than 1 question at a time.
|
60 |
-
|
61 |
-
Next step: Once you have all of this information, you can move on to the next step and begin with a brief explanation
|
62 |
-
|
63 |
-
STEP 2: BEGIN DOCUMENT COMPLETION
|
64 |
-
|
65 |
-
You should do this:
|
66 |
-
Think step by step and make a plan based on the goal of the user and based on their specific scenario. Now that you know a little bit about what the user knows, consider how you will:
|
67 |
-
- Guide the user in the most efficient way possible based on the information that is needed in their specific document.
|
68 |
-
- Help the user generate answers to the necessary questions.
|
69 |
-
- Remind the user of their goal if necessary.
|
70 |
-
- Provide explanations and examples when necessary.
|
71 |
-
- Tailor your responses and questions to the user's goal and prior knowledge, which might change as the conversation progresses.
|
72 |
-
- If applicable, use the documents uploaded in the "knowledge" section to guide your questions.
|
73 |
-
|
74 |
-
Do NOT do this:
|
75 |
-
- Provide immediate answers or solutions to problems.
|
76 |
-
- Lose track of the user's goal and discuss other things that are off topic.
|
77 |
-
|
78 |
-
Next step: Once you have all of the necessary information for the document, move to wrap up
|
79 |
-
|
80 |
-
STEP 3: WRAP UP
|
81 |
-
You should do this:
|
82 |
-
1. Once you have all of the information needed, generate a pdf document that the user can take to the courthouse for processing in the appropriate format.
|
83 |
-
'''''
|
84 |
-
|
85 |
-
def get_assistant_response_with_history(user_message, llm_chat_history_lc, model_name="gpt-3.5-turbo"):
|
86 |
-
# Convert the tuple-based chat history to the appropriate format
|
87 |
-
messages = [{'role': 'system', 'content': system_message}]
|
88 |
-
|
89 |
-
for user_msg, assistant_msg in llm_chat_history_lc:
|
90 |
-
messages.append({'role': 'user', 'content': user_msg})
|
91 |
-
messages.append({'role': 'assistant', 'content': assistant_msg})
|
92 |
-
|
93 |
-
# Add the new user message
|
94 |
-
messages.append({'role': 'user', 'content': user_message})
|
95 |
-
|
96 |
-
# Compute a completion (response) from the LLM
|
97 |
-
completion = client.chat.completions.create(
|
98 |
-
model=model_name,
|
99 |
-
messages=messages,
|
100 |
-
trace_id = trace_id # assigns a specific trace id for the entire conversation so the whole conversation is grouped together
|
101 |
-
)
|
102 |
-
|
103 |
-
# Get the assistant's response
|
104 |
-
assistant_response = completion.choices[0].message.content
|
105 |
-
|
106 |
-
# Update chat history with a tuple (user_message, assistant_response)
|
107 |
-
llm_chat_history_lc.append((user_message, assistant_response))
|
108 |
-
|
109 |
-
# Return the response and updated chat history
|
110 |
-
return assistant_response, llm_chat_history_lc
|
111 |
-
|
112 |
-
# Long-context approach function defined as 'approach_1' with one parameter 'query'
|
113 |
-
def approach_1(query):
|
114 |
-
global llm_chat_history_lc # function will use llm_chat_history_lc to maintain conversation history
|
115 |
-
response, llm_chat_history_lc = get_assistant_response_with_history(query, llm_chat_history_lc) # calls the long context function and passes the user's query and chat history as arguments
|
116 |
-
log_interaction("Long-Context Model", query, response) # logs the details of the interaction (the approach used, the query, and the llm's response)
|
117 |
-
return response # returns the model's response
|
118 |
-
|
119 |
-
|
120 |
-
# Logging function to log interactions and maintain conversation history
|
121 |
-
def log_interaction(approach, query, response):
|
122 |
-
log_entry = f"Approach: {approach}, Query: {query}, Response: {response}"
|
123 |
-
logging.info(log_entry)
|
124 |
-
|
125 |
-
# Function that allows the user to choose an approach to get a response
|
126 |
-
def choose_approach(approach, query):
|
127 |
-
if approach == "Long-Context Model":
|
128 |
-
return approach_1(query)
|
129 |
-
else:
|
130 |
-
return "Invalid approach selected."
|
131 |
-
|
132 |
-
# Defines a list of the available approaches
|
133 |
-
approaches = ["Long-Context Model"]
|
134 |
-
|
135 |
-
# Define the function that will be called when the user submits messages
|
136 |
-
def respond(user_message, chatbot_history):
|
137 |
-
# Get the response from the assistant
|
138 |
-
assistant_response, updated_history = get_assistant_response_with_history(user_message, chatbot_history)
|
139 |
-
return "", updated_history
|
140 |
-
|
141 |
-
# Create the Gradio interface
|
142 |
-
with gr.Blocks() as demo:
|
143 |
-
|
144 |
-
gr.Markdown("# Legal Empowerment Interface") # Interface Title
|
145 |
-
gr.Markdown("### Select a model and enter your query below:") # Interface subtitle
|
146 |
-
|
147 |
-
with gr.Row():
|
148 |
-
with gr.Column(scale=1):
|
149 |
-
approach_dropdown = gr.Dropdown(choices=approaches, label="Select Approach") # Creates the dropdown for selecting an approach
|
150 |
-
|
151 |
-
chatbot_history = gr.Chatbot() # This will store the chat history
|
152 |
-
msg_textbox = gr.Textbox(placeholder="Type a message...") # This is where the user types their message
|
153 |
-
reset_button = gr.Button("Clear Chat") # Button to clear the chat history
|
154 |
-
|
155 |
-
# Define what happens when the user submits a message
|
156 |
-
msg_textbox.submit(respond, inputs=[msg_textbox, chatbot_history], outputs=[msg_textbox, chatbot_history])
|
157 |
-
|
158 |
-
# Define what happens when the reset button is clicked
|
159 |
-
reset_button.click(lambda: ([], ""), outputs=[chatbot_history, msg_textbox])
|
160 |
-
|
161 |
-
gr.Markdown("### Thank you for using our Legal Empowerment Interface!") # Closing message
|
162 |
-
|
163 |
-
# Launch the interface
|
164 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|