hikinegi commited on
Commit
9e7e2e2
1 Parent(s): cc993e0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """orca_mini_3b_T4_GPU.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/#fileId=https%3A//huggingface.co/psmathur/orca_mini_3b/blob/main/orca_mini_3b_T4_GPU.ipynb
8
+ """
9
+
10
+ !pip -q install transformers
11
+ !pip -q install sentencepiece
12
+ !pip -q install accelerate
13
+ !pip -q install gradio
14
+
15
+ import torch
16
+ from transformers import LlamaForCausalLM, LlamaTokenizer
17
+
18
+ # Hugging Face model_path
19
+ model_path = 'psmathur/orca_mini_3b'
20
+ tokenizer = LlamaTokenizer.from_pretrained(model_path)
21
+ model = LlamaForCausalLM.from_pretrained(
22
+ model_path, torch_dtype=torch.float16, device_map='auto',
23
+ )
24
+
25
+
26
+ #generate text function
27
+ def predict(system, instruction, input=None):
28
+
29
+ if input:
30
+ prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
31
+ else:
32
+ prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Response:\n"
33
+
34
+ tokens = tokenizer.encode(prompt)
35
+ tokens = torch.LongTensor(tokens).unsqueeze(0)
36
+ tokens = tokens.to('cuda')
37
+
38
+ instance = {'input_ids': tokens,'top_p': 1.0, 'temperature':0.7, 'generate_len': 1024, 'top_k': 50}
39
+
40
+ length = len(tokens[0])
41
+ with torch.no_grad():
42
+ rest = model.generate(
43
+ input_ids=tokens,
44
+ max_length=length+instance['generate_len'],
45
+ use_cache=True,
46
+ do_sample=True,
47
+ top_p=instance['top_p'],
48
+ temperature=instance['temperature'],
49
+ top_k=instance['top_k']
50
+ )
51
+ output = rest[0][length:]
52
+ string = tokenizer.decode(output, skip_special_tokens=True)
53
+ return f'[!] Response: {string}'
54
+
55
+ import gradio as gr
56
+
57
+ # Define input components
58
+ prompt_input = gr.inputs.Textbox(label="System")
59
+ instruction_input = gr.inputs.Textbox(label="Instruction")
60
+ context_input = gr.inputs.Textbox(label="Context")
61
+
62
+ # Define output component
63
+ output_text = gr.outputs.Textbox(label="Output")
64
+
65
+ # Create the interface
66
+ interface = gr.Interface(fn=predict,
67
+ inputs=[prompt_input, instruction_input, context_input],
68
+ outputs=output_text)
69
+
70
+
71
+ interface.queue()
72
+
73
+ interface.launch(share=True)