File size: 2,417 Bytes
449b878
 
 
 
 
 
d8cfa9b
449b878
 
d8cfa9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2994b7
 
 
 
 
 
 
 
d8cfa9b
e2994b7
 
 
 
d8cfa9b
e2994b7
 
 
d8cfa9b
 
e2994b7
d8cfa9b
 
e2994b7
d8cfa9b
 
e2994b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8cfa9b
 
 
 
 
449b878
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
---
library_name: transformers
tags:
- unsloth
---

# Requirements

<!-- Provide a quick summary of what the model is/does. -->
```python

%%capture
import torch
major_version, minor_version = torch.cuda.get_device_capability()
# Must install separately since Colab has torch 2.2.1, which breaks packages
!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
if major_version >= 8:
    # Use this for new GPUs like Ampere, Hopper GPUs (RTX 30xx, RTX 40xx, A100, H100, L40)
    !pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes
else:
    # Use this for older GPUs (V100, Tesla T4, RTX 20xx)
    !pip install --no-deps xformers trl peft accelerate bitsandbytes
pass
!pip install gradio

```


## Inference
```python

import torch
from transformers import AutoTokenizer
from peft import AutoPeftModelForCausalLM
import gradio as gr

# Load the model and tokenizer
model_id = "DisgustingOzil/MIstral_Pak_Law"
dtype = torch.float16  # Adjust as necessary
load_in_4bit = True

model = AutoPeftModelForCausalLM.from_pretrained(
    model_id,
    load_in_4bit=load_in_4bit,
    torch_dtype=dtype,
).to("cuda")
tokenizer = AutoTokenizer.from_pretrained(model_id)

def generate_response(context, question):
    alpaca_prompt = f"""Given the context and a specific question, generate a comprehensive and detailed response that accurately addresses the query.

### Context:
{context}

### Question:
{question}

### Answer:
"""

    inputs = tokenizer(
        [alpaca_prompt], return_tensors="pt", padding=True, truncation=True, max_length=2048
    ).to("cuda")

    outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
    response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
    summary_start_index = response[0].find("### Answer:")
    summary_text = response[0][summary_start_index:].replace("### Answer:", "").strip()
    return summary_text
    # return response[0]

# Define the Gradio interface
iface = gr.Interface(
    fn=generate_response,
    inputs=[gr.Textbox(label="Context"), gr.Textbox(label="Question")],
    outputs=gr.Textbox(label="Answer"),
    title="Abandoned Properties Act 1975 Query",
    description="Enter the context and a specific question to generate a response based on the Abandoned Properties (Taking Over and Management) Act 1975.",
)

# Run the app
iface.launch(debug=True)




```