Monster commited on
Commit
be5354c
1 Parent(s): 31700ad

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -0
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Iterable
3
+ import gradio as gr
4
+ from gradio.themes.base import Base
5
+ from gradio.themes.utils import colors, fonts, sizes
6
+
7
+ from llama_cpp import Llama
8
+ from huggingface_hub import hf_hub_download
9
+
10
+ hf_hub_download(repo_id="Pi3141/alpaca-lora-7B-ggml", filename="ggml-model-q4_1.bin", local_dir=".")
11
+ llm = Llama(model_path="./ggml-model-q4_1.bin", n_threads=2)
12
+
13
+
14
+ ins = '''### Instruction:
15
+ {}
16
+ ### Response:
17
+ '''
18
+
19
+ theme = gr.themes.Monochrome(
20
+ primary_hue="indigo",
21
+ secondary_hue="blue",
22
+ neutral_hue="slate",
23
+ radius_size=gr.themes.sizes.radius_sm,
24
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
25
+ )
26
+
27
+
28
+
29
+
30
+ # def generate(instruction):
31
+ # response = llm(ins.format(instruction))
32
+ # response = response['choices'][0]['text']
33
+ # result = ""
34
+ # for word in response.split(" "):
35
+ # result += word + " "
36
+ # yield result
37
+
38
+ def generate(instruction):
39
+ result = ""
40
+ for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True):
41
+ result += x['choices'][0]['text']
42
+ yield result
43
+
44
+
45
+ examples = [
46
+ "Instead of making a peanut butter and jelly sandwich, what else could I combine peanut butter with in a sandwich? Give five ideas",
47
+ "How do I make a campfire?",
48
+ "Explain to me the difference between nuclear fission and fusion.",
49
+ "I'm selling my Nikon D-750, write a short blurb for my ad."
50
+ ]
51
+
52
+ def process_example(args):
53
+ for x in generate(args):
54
+ pass
55
+ return x
56
+
57
+ css = ".generating {visibility: hidden}"
58
+
59
+ # Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo
60
+ class SeafoamCustom(Base):
61
+ def __init__(
62
+ self,
63
+ *,
64
+ primary_hue: colors.Color | str = colors.emerald,
65
+ secondary_hue: colors.Color | str = colors.blue,
66
+ neutral_hue: colors.Color | str = colors.blue,
67
+ spacing_size: sizes.Size | str = sizes.spacing_md,
68
+ radius_size: sizes.Size | str = sizes.radius_md,
69
+ font: fonts.Font
70
+ | str
71
+ | Iterable[fonts.Font | str] = (
72
+ fonts.GoogleFont("Quicksand"),
73
+ "ui-sans-serif",
74
+ "sans-serif",
75
+ ),
76
+ font_mono: fonts.Font
77
+ | str
78
+ | Iterable[fonts.Font | str] = (
79
+ fonts.GoogleFont("IBM Plex Mono"),
80
+ "ui-monospace",
81
+ "monospace",
82
+ ),
83
+ ):
84
+ super().__init__(
85
+ primary_hue=primary_hue,
86
+ secondary_hue=secondary_hue,
87
+ neutral_hue=neutral_hue,
88
+ spacing_size=spacing_size,
89
+ radius_size=radius_size,
90
+ font=font,
91
+ font_mono=font_mono,
92
+ )
93
+ super().set(
94
+ button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
95
+ button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
96
+ button_primary_text_color="white",
97
+ button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
98
+ block_shadow="*shadow_drop_lg",
99
+ button_shadow="*shadow_drop_lg",
100
+ input_background_fill="zinc",
101
+ input_border_color="*secondary_300",
102
+ input_shadow="*shadow_drop",
103
+ input_shadow_focus="*shadow_drop_lg",
104
+ )
105
+
106
+
107
+ seafoam = SeafoamCustom()
108
+
109
+
110
+ with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
111
+ with gr.Column():
112
+ gr.Markdown(
113
+ """ ## Alpaca-LoRa
114
+
115
+ is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the Stanford Alpaca dataset and makes use of the Huggingface LLaMA implementation.
116
+
117
+ Type in the box below and click the button to generate answers to your most pressing questions!
118
+
119
+ """
120
+ )
121
+
122
+ with gr.Row():
123
+ with gr.Column(scale=3):
124
+ instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
125
+
126
+ with gr.Box():
127
+ gr.Markdown("**Answer**")
128
+ output = gr.Markdown(elem_id="q-output")
129
+ submit = gr.Button("Generate", variant="primary")
130
+ gr.Examples(
131
+ examples=examples,
132
+ inputs=[instruction],
133
+ cache_examples=True,
134
+ fn=process_example,
135
+ outputs=[output],
136
+ )
137
+
138
+
139
+
140
+ submit.click(generate, inputs=[instruction], outputs=[output])
141
+ instruction.submit(generate, inputs=[instruction], outputs=[output])
142
+
143
+ demo.queue(concurrency_count=1).launch(debug=True)