Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,21 @@
|
|
1 |
import requests
|
2 |
import streamlit as st
|
3 |
-
|
|
|
4 |
|
5 |
-
# Initialize
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def call_llama_for_response(clauses_data):
|
9 |
prompt = "As an AI assistant specializing in contract analysis, draft a professional and courteous response to a contract drafter based on the following clause analyses and decisions:\n\n"
|
@@ -19,18 +31,8 @@ def call_llama_for_response(clauses_data):
|
|
19 |
|
20 |
prompt += "Draft a response that addresses each clause, explaining our position on acceptance, rejection, or negotiation. The tone should be professional, courteous, and constructive."
|
21 |
|
22 |
-
response =
|
23 |
-
|
24 |
-
messages=[{"role": "user", "content": prompt}],
|
25 |
-
max_tokens=2048,
|
26 |
-
temperature=0.3,
|
27 |
-
top_p=0.8,
|
28 |
-
top_k=50,
|
29 |
-
repetition_penalty=1,
|
30 |
-
stop=["<|eot_id|>", "<|eom_id|>"],
|
31 |
-
stream=False
|
32 |
-
)
|
33 |
-
return response.choices[0].message.content
|
34 |
|
35 |
st.title("Contract Negotiation Assistant")
|
36 |
|
|
|
1 |
import requests
|
2 |
import streamlit as st
|
3 |
+
import torch
|
4 |
+
from transformers import pipeline
|
5 |
|
6 |
+
# Initialize Llama 3.2 model
|
7 |
+
@st.cache_resource
|
8 |
+
def load_model():
|
9 |
+
model_id = "meta-llama/Llama-3.2-1B"
|
10 |
+
pipe = pipeline(
|
11 |
+
"text-generation",
|
12 |
+
model=model_id,
|
13 |
+
torch_dtype=torch.bfloat16,
|
14 |
+
device_map="auto"
|
15 |
+
)
|
16 |
+
return pipe
|
17 |
+
|
18 |
+
pipe = load_model()
|
19 |
|
20 |
def call_llama_for_response(clauses_data):
|
21 |
prompt = "As an AI assistant specializing in contract analysis, draft a professional and courteous response to a contract drafter based on the following clause analyses and decisions:\n\n"
|
|
|
31 |
|
32 |
prompt += "Draft a response that addresses each clause, explaining our position on acceptance, rejection, or negotiation. The tone should be professional, courteous, and constructive."
|
33 |
|
34 |
+
response = pipe(prompt, max_new_tokens=500, do_sample=True, temperature=0.7)
|
35 |
+
return response[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
st.title("Contract Negotiation Assistant")
|
38 |
|