DisgustingOzil
commited on
Commit
•
e2994b7
1
Parent(s):
d8cfa9b
Update README.md
Browse files
README.md
CHANGED
@@ -29,42 +29,57 @@ pass
|
|
29 |
## Inference
|
30 |
```python
|
31 |
|
32 |
-
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
33 |
-
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
34 |
-
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
35 |
import torch
|
36 |
-
from peft import AutoPeftModelForCausalLM
|
37 |
from transformers import AutoTokenizer
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
)
|
45 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
46 |
-
|
47 |
-
|
|
|
48 |
|
49 |
### Context:
|
50 |
-
{}
|
51 |
|
52 |
### Question:
|
53 |
-
{}
|
54 |
|
55 |
### Answer:
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
|
70 |
|
|
|
29 |
## Inference
|
30 |
```python
|
31 |
|
|
|
|
|
|
|
32 |
import torch
|
|
|
33 |
from transformers import AutoTokenizer
|
34 |
+
from peft import AutoPeftModelForCausalLM
|
35 |
+
import gradio as gr
|
36 |
+
|
37 |
+
# Load the model and tokenizer
|
38 |
+
model_id = "DisgustingOzil/MIstral_Pak_Law"
|
39 |
+
dtype = torch.float16 # Adjust as necessary
|
40 |
+
load_in_4bit = True
|
41 |
+
|
42 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
43 |
+
model_id,
|
44 |
+
load_in_4bit=load_in_4bit,
|
45 |
+
torch_dtype=dtype,
|
46 |
+
).to("cuda")
|
|
|
47 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
48 |
+
|
49 |
+
def generate_response(context, question):
|
50 |
+
alpaca_prompt = f"""Given the context and a specific question, generate a comprehensive and detailed response that accurately addresses the query.
|
51 |
|
52 |
### Context:
|
53 |
+
{context}
|
54 |
|
55 |
### Question:
|
56 |
+
{question}
|
57 |
|
58 |
### Answer:
|
59 |
+
"""
|
60 |
+
|
61 |
+
inputs = tokenizer(
|
62 |
+
[alpaca_prompt], return_tensors="pt", padding=True, truncation=True, max_length=2048
|
63 |
+
).to("cuda")
|
64 |
+
|
65 |
+
outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
|
66 |
+
response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
67 |
+
summary_start_index = response[0].find("### Answer:")
|
68 |
+
summary_text = response[0][summary_start_index:].replace("### Answer:", "").strip()
|
69 |
+
return summary_text
|
70 |
+
# return response[0]
|
71 |
+
|
72 |
+
# Define the Gradio interface
|
73 |
+
iface = gr.Interface(
|
74 |
+
fn=generate_response,
|
75 |
+
inputs=[gr.Textbox(label="Context"), gr.Textbox(label="Question")],
|
76 |
+
outputs=gr.Textbox(label="Answer"),
|
77 |
+
title="Abandoned Properties Act 1975 Query",
|
78 |
+
description="Enter the context and a specific question to generate a response based on the Abandoned Properties (Taking Over and Management) Act 1975.",
|
79 |
+
)
|
80 |
+
|
81 |
+
# Run the app
|
82 |
+
iface.launch(debug=True)
|
83 |
|
84 |
|
85 |
|