Stepan Zadolya commited on
Commit
a03974c
1 Parent(s): 5b2b4b8
Files changed (1) hide show
  1. app.py +148 -4
app.py CHANGED
@@ -6,10 +6,154 @@
6
  # About:
7
  # --------------------------------
8
 
 
 
 
 
9
  import gradio as gr
10
 
11
- def greet(name):
12
- return "Hello " + name + "!!"
 
 
13
 
14
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
15
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  # About:
7
  # --------------------------------
8
 
9
+ import sys
10
+ import torch
11
+ from peft import PeftModel
12
+ import transformers
13
  import gradio as gr
14
 
15
+ assert (
16
+ "LlamaTokenizer" in transformers._import_structure["models.llama"]
17
+ ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
18
+ from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
19
 
20
+
21
+ SHARE_GRADIO=True
22
+ LOAD_8BIT = False
23
+
24
+ BASE_MODEL = "mrzlab630/weights_Llama_7b"
25
+ LORA_WEIGHTS = "mrzlab630/lora-alpaca-trading-candles"
26
+ #BASE_MODEL = "decapoda-research/llama-7b-hf"
27
+
28
+ tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
29
+
30
+ if torch.cuda.is_available():
31
+ device = "cuda"
32
+ else:
33
+ device = "cpu"
34
+
35
+ try:
36
+ if torch.backends.mps.is_available():
37
+ device = "mps"
38
+ except:
39
+ pass
40
+
41
+ if device == "cuda":
42
+ model = LlamaForCausalLM.from_pretrained(
43
+ BASE_MODEL,
44
+ load_in_8bit=LOAD_8BIT,
45
+ torch_dtype=torch.float16,
46
+ device_map="auto",
47
+ )
48
+ model = PeftModel.from_pretrained(
49
+ model,
50
+ LORA_WEIGHTS,
51
+ torch_dtype=torch.float16,
52
+ )
53
+ elif device == "mps":
54
+ model = LlamaForCausalLM.from_pretrained(
55
+ BASE_MODEL,
56
+ device_map={"": device},
57
+ torch_dtype=torch.float16,
58
+ )
59
+ model = PeftModel.from_pretrained(
60
+ model,
61
+ LORA_WEIGHTS,
62
+ device_map={"": device},
63
+ torch_dtype=torch.float16,
64
+ )
65
+ else:
66
+ model = LlamaForCausalLM.from_pretrained(
67
+ BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
68
+ )
69
+ model = PeftModel.from_pretrained(
70
+ model,
71
+ LORA_WEIGHTS,
72
+ device_map={"": device},
73
+ )
74
+
75
+
76
+ def generate_prompt(instruction, input=None):
77
+ if input:
78
+ return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
79
+
80
+ ### Instruction:
81
+ {instruction}
82
+
83
+ ### Input:
84
+ {input}
85
+
86
+ ### Response:"""
87
+ else:
88
+ return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
89
+
90
+ ### Instruction:
91
+ {instruction}
92
+
93
+ ### Response:"""
94
+
95
+ if not LOAD_8BIT:
96
+ model.half() # seems to fix bugs for some users.
97
+
98
+ model.eval()
99
+ if torch.__version__ >= "2" and sys.platform != "win32":
100
+ model = torch.compile(model)
101
+
102
+
103
+ def evaluate(
104
+ instruction,
105
+ input=None,
106
+ temperature=0.1,
107
+ top_p=0.75,
108
+ top_k=40,
109
+ num_beams=4,
110
+ max_new_tokens=128,
111
+ **kwargs,
112
+ ):
113
+ prompt = generate_prompt(instruction, input)
114
+ inputs = tokenizer(prompt, return_tensors="pt")
115
+ input_ids = inputs["input_ids"].to(device)
116
+ generation_config = GenerationConfig(
117
+ temperature=temperature,
118
+ top_p=top_p,
119
+ top_k=top_k,
120
+ num_beams=num_beams,
121
+ **kwargs,
122
+ )
123
+ with torch.no_grad():
124
+ generation_output = model.generate(
125
+ input_ids=input_ids,
126
+ generation_config=generation_config,
127
+ return_dict_in_generate=True,
128
+ output_scores=True,
129
+ max_new_tokens=max_new_tokens,
130
+ )
131
+ s = generation_output.sequences[0]
132
+ output = tokenizer.decode(s)
133
+ return output.split("### Response:")[1].strip()
134
+
135
+
136
+ gr.Interface(
137
+ fn=evaluate,
138
+ inputs=[
139
+ gr.components.Textbox(
140
+ lines=2, label="Instruction", placeholder="Tell me about alpacas."
141
+ ),
142
+ gr.components.Textbox(lines=2, label="Input", placeholder="none"),
143
+ gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
144
+ gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
145
+ gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
146
+ gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
147
+ gr.components.Slider(
148
+ minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
149
+ ),
150
+ ],
151
+ outputs=[
152
+ gr.inputs.Textbox(
153
+ lines=5,
154
+ label="Output",
155
+ )
156
+ ],
157
+ title="💹 🕯 Alpaca-LoRA-Trading-Candles",
158
+ description="Alpaca-LoRA-Trading-Candles is a 7B-parameter LLaMA model tuned to execute instructions. It is trained on the [trading candles dataset](https://huggingface.co/datasets/mrzlab630/trading-candles) and uses the Huggingface LLaMA implementation. For more information, visit [project website](https://huggingface.co/mrzlab630/lora-alpaca-trading-candles).",
159
+ ).launch(server_name="0.0.0.0", share=SHARE_GRADIO)