File size: 9,467 Bytes
8e87361
 
2043cbf
 
 
3019864
2043cbf
a7badbe
2043cbf
 
8e87361
 
a860a93
b42f460
7459cda
8e87361
 
 
 
 
1f64567
 
8e87361
 
1f64567
8e87361
 
 
1f64567
 
8e87361
 
 
 
 
 
 
1f64567
 
 
8e87361
 
 
1f64567
8e87361
09d9eca
 
ad02354
bc6e9dd
5df2eca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f64567
09d9eca
 
 
 
 
 
 
8e87361
1f64567
 
 
09d9eca
8e87361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f64567
 
 
8e87361
1f64567
8e87361
 
 
1f64567
8e87361
 
 
 
 
 
 
 
 
 
 
1f64567
 
 
8e87361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f64567
8e87361
 
 
 
1f64567
8e87361
 
 
 
1f64567
 
 
 
 
 
 
 
8e87361
 
1f64567
8e87361
 
 
1f64567
 
8e87361
 
 
 
1f64567
 
8e87361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f64567
662e346
1f64567
8e87361
 
 
 
 
 
 
1f64567
8e87361
 
 
 
 
1f64567
 
 
8e87361
1f64567
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import os
import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.text_splitter import CharacterTextSplitter
import openai


# Set OpenAI API Key (I used Hugging Face Secrets Environment and Inserted my API Key there)
openai.api_key = os.environ.get("OPENAI_API_KEY")

# Document file paths
file1 = "./data/DIVISION OF ASSETS AFTER DIVORCE.txt"
file2 = "./data/INHERITANCE.txt"


# Function to initialize the OpenAI embeddings and model
def openai_setting():
    embedding = OpenAIEmbeddings()
    model_name = "gpt-4o-mini"
    llm = ChatOpenAI(model_name=model_name, temperature=0)
    return embedding, llm


# Function to split the law content
def law_content_splitter(path, splitter="CIVIL CODE"):
    with open(path) as f:
        law_content = f.read()
    law_content_by_article = law_content.split(splitter)[1:]
    text_splitter = CharacterTextSplitter()
    return text_splitter.create_documents(law_content_by_article)


# Function to handle chatbot logic
def chatbot1(question):
    try:
        return agent.run(question)
    except Exception as e:
        return f"I'm sorry, I'm having trouble understanding your question. Error: {str(e)}"




### Improve Greeting Function
# Define the greetings list at a global level
greetings = [
    "hello",
    "hi",
    "hey",
    "greetings",
    "good morning",
    "good afternoon",
    "good evening",
    "hi there",
    "hello there",
    "hey there",
    "whats up",
    "ciao",
    "salve",
    "buongiorno",
    "buona sera",
    "buonasera",
    "buon pomeriggio",
    "buonpomeriggio",
    "come stai",
    "comestai",
    "come va",
    "comeva",
    "come sta",
    "comesta",
]

# Function to determine if input is a greeting
def is_greeting(input_str):
    return any(greet in input_str.lower() for greet in greetings)

# Function to handle chatbot logic
def chatbot(input_str):
    # Check if the input starts with a greeting
    if any(input_str.lower().startswith(greet) for greet in greetings):
        # Check if the input contains more than just a greeting
        if len(input_str.split()) <= 3:  # Simple check, can be adjusted
            return "Hello! Ask me your question about Italian Divorce or Inheritance Law?"
        else:
            return chatbot1(input_str)
    else:
        return chatbot1(input_str)



# Splitting the content of law documents
divorce_splitted = law_content_splitter(file1)
inheritance_splitted = law_content_splitter(file2)

# Initializing embedding and language model
embedding, llm = openai_setting()

# Define the prompts
divorce_prompt = """As a specialized bot in divorce law, you should offer accurate insights on Italian divorce regulations.
You should always cite the article numbers you reference. 
Ensure you provide detailed and exact data. 
If a query doesn't pertain to the legal documents, you should remind the user that it falls outside your expertise.
You should be adept at discussing the various Italian divorce categories, including fault-based divorce, mutual-consent divorce, and divorce due to infidelity.
You should guide users through the prerequisites and procedures of each divorce type, detailing the essential paperwork, expected duration, and potential legal repercussions. 
You should capably address queries regarding asset allocation, child custody, spousal support, and other financial concerns related to divorce, all while staying true to Italian legislation.
{context}

Question: {question}"""
DIVORCE_BOT_PROMPT = PromptTemplate(
    template=divorce_prompt, input_variables=["context", "question"]
)

# Define inheritance prompt
inheritance_prompt = """As a specialist in Italian inheritance law, you should deliver detailed and accurate insights about inheritance regulations in Italy.
You should always cite the article numbers you reference. 
When responding to user queries, you should always base your answers on the provided context.
Always MUST cite the specific article numbers you mention and refrain from speculating.
Maintain precision in all your responses.
If a user's question doesn't align with the legal documents, you should point out that it's beyond your domain of expertise.
You should elucidate Italian inheritance law comprehensively, touching on topics such as testamentary inheritance, intestate inheritance, and other pertinent subjects.
Make sure to elaborate on the obligations and rights of inheritors, the methodology of estate distribution, asset assessment, and settling debts, all while adhering to Italian law specifics.
You should adeptly tackle questions about various will forms like holographic or notarial wills, ensuring you clarify their legitimacy within Italian jurisdiction. 
Offer advice on creating a will, naming heirs, and managing potential conflicts.
You should provide detailed information on tax nuances associated with inheritance in Italy, inclusive of exemptions, tax rates, and mandatory disclosures.

{context}

Question: {question}"""
INHERITANCE_BOT_PROMPT = PromptTemplate(
    template=inheritance_prompt, input_variables=["context", "question"]
)

# Setup for Chroma databases and RetrievalQA
chroma_directory = "./docs/chroma"

inheritance_db = Chroma.from_documents(
    documents=inheritance_splitted,
    embedding=embedding,
    persist_directory=chroma_directory,
)
inheritance = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=inheritance_db.as_retriever(),
    chain_type_kwargs={"prompt": INHERITANCE_BOT_PROMPT},
)

divorce_db = Chroma.from_documents(
    documents=divorce_splitted, embedding=embedding, persist_directory=chroma_directory
)
divorce = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=divorce_db.as_retriever(),
    chain_type_kwargs={"prompt": DIVORCE_BOT_PROMPT},
)

# Define the tools for the chatbot
tools = [
    Tool(
        name="Divorce Italian law QA System",
        func=divorce.run,
        description="Useful for when you need to answer questions about divorce laws in Italy. Also provides the number of the article you use.",
    ),
    Tool(
        name="Inheritance Italian law QA System",
        func=inheritance.run,
        description="Useful for when you need to answer questions about inheritance laws in Italy. Also provides the number of the article you use.",
    ),
]

# Initialize conversation memory and ReAct agent
memory = ConversationBufferMemory(
    memory_key="chat_history", input_key="input", output_key="output"
)
react = initialize_agent(tools, llm, agent="zero-shot-react-description")
agent = AgentExecutor.from_agent_and_tools(
    tools=tools, agent=react.agent, memory=memory, verbose=False
)


# Streamlit UI Setup
def setup_ui():
    st.set_page_config(page_title="Italian Law Chatbot", page_icon="⚖️")
    st.title("🏛️ Legal Chatbot: Divorce and Inheritance Italy Laws ")

    st.write(
        """
    [![HuggingFace Space](https://huggingface.co/datasets/huggingface/badges/resolve/main/open-in-hf-spaces-sm.svg)](https://huggingface.co/spaces/sattari/legal-chat-bot/tree/main)
    [![Github Repository](https://img.shields.io/badge/GitHub%20Repository-gray?logo=github)](https://github.com/pouyasattari/Legal-Chatbot-italy-divorce-inheritance)
    [![SATTARI.org](https://img.shields.io/badge/SATTARI.org-gray?logo=internetexplorer)](https://www.sattari.org)
    ![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fsattari-legal-chat-bot.hf.space&label=Visitors&labelColor=%235d5d5d&countColor=%231e7ebf&style=flat)
    """
    )

    st.info(
        "Check out full tutorial to build this app on Streamlit [📝 blog](https://sattari.org/legal-chatbot-divorce-and-inheritance-italy-laws/)",
        icon="ℹ️",
    )

    st.success(
        "Check out [Prompt Examples List](https://github.com/pouyasattari/Legal-Chatbot-italy-divorce-inheritance/blob/main/promptExamples.txt) to know how to interact with this ChatBot 🤗 ",
        icon="✅",
    )
    if "messages" not in st.session_state:
        st.session_state.messages = [
            {
                "role": "assistant",
                "content": "Hello! I'm here to help you with Italian Divorce or Inheritance Law.",
            }
        ]

    # Display previous messages and handle new user input
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    if user_input := st.chat_input(
        "Ask your question in English or Italiano ;)"
    ):
        st.session_state.messages.append({"role": "user", "content": user_input})
        with st.chat_message("user"):
            st.markdown(user_input)

        # Generate and display chatbot response
        with st.chat_message("assistant"):
            response_placeholder = st.empty()
            response = chatbot(user_input)  # Your existing chatbot function
            response_placeholder.markdown(response)

        # Append the response to the conversation history
        st.session_state.messages.append({"role": "assistant", "content": response})


## Made by Pouya /  www.SATTARI.org

if __name__ == "__main__":
    setup_ui()