nikshep01 commited on
Commit
d3fd0d6
·
verified ·
1 Parent(s): dd12f46

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from llama_cpp import Llama
4
+ from datasets import load_metric
5
+
6
+ pipe = pipeline("text-generation", model="varma007ut/Indian_Legal_Assitant")
7
+
8
+ prompt = "Summarize the key points of the Indian Contract Act, 1872:"
9
+ result = pipe(prompt, max_length=200)
10
+ print(result[0]['generated_text'])
11
+
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained("varma007ut/Indian_Legal_Assitant")
14
+ model = AutoModelForCausalLM.from_pretrained("varma007ut/Indian_Legal_Assitant")
15
+
16
+ prompt = "What are the fundamental rights in the Indian Constitution?"
17
+ inputs = tokenizer(prompt, return_tensors="pt")
18
+ outputs = model.generate(**inputs, max_length=200)
19
+ print(tokenizer.decode(outputs[0]))
20
+
21
+
22
+
23
+ llm = Llama.from_pretrained(
24
+ repo_id="varma007ut/Indian_Legal_Assitant",
25
+ filename="ggml-model-q4_0.gguf", # Replace with the actual GGUF filename if different
26
+ )
27
+
28
+ response = llm.create_chat_completion(
29
+ messages = [
30
+ {
31
+ "role": "user",
32
+ "content": "Explain the concept of judicial review in India."
33
+ }
34
+ ]
35
+ )
36
+
37
+ print(response['choices'][0]['message']['content'])
38
+
39
+
40
+ bleu = load_metric("bleu")
41
+ predictions = model.generate(encoded_input)
42
+ results = bleu.compute(predictions=predictions, references=references)