sachit-sankhe commited on
Commit
67f0ddf
1 Parent(s): 3a7082b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -1
app.py CHANGED
@@ -1,7 +1,23 @@
1
  import gradio as gr
 
 
 
2
 
3
  def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  iface.launch(share=True)
 
1
  import gradio as gr
2
+ import torch
3
+ from peft import PeftModel, PeftConfig
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
  def greet(name):
7
+ peft_model_id = "sachit-sankhe/openllama7b-lora-adapter2"
8
+ config = PeftConfig.from_pretrained(peft_model_id)
9
+ loaded_model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto')
10
+ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
11
+ # Load the Lora model
12
+ loaded_model = PeftModel.from_pretrained(loaded_model, peft_model_id)
13
+ input_prompt = name
14
+ batch = tokenizer(f"###Human: {input_prompt}### Assistant: ", return_tensors='pt')
15
+
16
+ with torch.cuda.amp.autocast():
17
+ output_tokens = loaded_model.generate(**batch,max_new_tokens=300)
18
+
19
+ return str('\n\n', tokenizer.decode(output_tokens[0], skip_special_tokens=True))
20
+
21
 
22
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
23
  iface.launch(share=True)