erivandev Monster commited on
Commit
32933fb
0 Parent(s):

Duplicate from Monster/GPT4ALL

Browse files

Co-authored-by: Monster <Monster@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +143 -0
  4. ggjt-model.bin +3 -0
  5. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Gpt4all
3
+ emoji: 🦀
4
+ colorFrom: gray
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.24.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: Monster/GPT4ALL
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Iterable
3
+ import gradio as gr
4
+ from gradio.themes.base import Base
5
+ from gradio.themes.utils import colors, fonts, sizes
6
+
7
+ from llama_cpp import Llama
8
+ #from huggingface_hub import hf_hub_download
9
+
10
+ #hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
11
+ llm = Llama(model_path="./ggjt-model.bin")
12
+
13
+
14
+ ins = '''### Instruction:
15
+ {}
16
+ ### Response:
17
+ '''
18
+
19
+ theme = gr.themes.Monochrome(
20
+ primary_hue="indigo",
21
+ secondary_hue="blue",
22
+ neutral_hue="slate",
23
+ radius_size=gr.themes.sizes.radius_sm,
24
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
25
+ )
26
+
27
+
28
+
29
+
30
+ # def generate(instruction):
31
+ # response = llm(ins.format(instruction))
32
+ # response = response['choices'][0]['text']
33
+ # result = ""
34
+ # for word in response.split(" "):
35
+ # result += word + " "
36
+ # yield result
37
+
38
+ def generate(instruction):
39
+ result = ""
40
+ for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True):
41
+ result += x['choices'][0]['text']
42
+ yield result
43
+
44
+
45
+ examples = [
46
+ "Instead of making a peanut butter and jelly sandwich, what else could I combine peanut butter with in a sandwich? Give five ideas",
47
+ "How do I make a campfire?",
48
+ "Explain to me the difference between nuclear fission and fusion.",
49
+ "I'm selling my Nikon D-750, write a short blurb for my ad."
50
+ ]
51
+
52
+ def process_example(args):
53
+ for x in generate(args):
54
+ pass
55
+ return x
56
+
57
+ css = ".generating {visibility: hidden}"
58
+
59
+ # Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo
60
+ class SeafoamCustom(Base):
61
+ def __init__(
62
+ self,
63
+ *,
64
+ primary_hue: colors.Color | str = colors.emerald,
65
+ secondary_hue: colors.Color | str = colors.blue,
66
+ neutral_hue: colors.Color | str = colors.blue,
67
+ spacing_size: sizes.Size | str = sizes.spacing_md,
68
+ radius_size: sizes.Size | str = sizes.radius_md,
69
+ font: fonts.Font
70
+ | str
71
+ | Iterable[fonts.Font | str] = (
72
+ fonts.GoogleFont("Quicksand"),
73
+ "ui-sans-serif",
74
+ "sans-serif",
75
+ ),
76
+ font_mono: fonts.Font
77
+ | str
78
+ | Iterable[fonts.Font | str] = (
79
+ fonts.GoogleFont("IBM Plex Mono"),
80
+ "ui-monospace",
81
+ "monospace",
82
+ ),
83
+ ):
84
+ super().__init__(
85
+ primary_hue=primary_hue,
86
+ secondary_hue=secondary_hue,
87
+ neutral_hue=neutral_hue,
88
+ spacing_size=spacing_size,
89
+ radius_size=radius_size,
90
+ font=font,
91
+ font_mono=font_mono,
92
+ )
93
+ super().set(
94
+ button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
95
+ button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
96
+ button_primary_text_color="white",
97
+ button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
98
+ block_shadow="*shadow_drop_lg",
99
+ button_shadow="*shadow_drop_lg",
100
+ input_background_fill="zinc",
101
+ input_border_color="*secondary_300",
102
+ input_shadow="*shadow_drop",
103
+ input_shadow_focus="*shadow_drop_lg",
104
+ )
105
+
106
+
107
+ seafoam = SeafoamCustom()
108
+
109
+
110
+ with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
111
+ with gr.Column():
112
+ gr.Markdown(
113
+ """ ## GPT4ALL
114
+
115
+ 7b quantized 4bit (q4_0)
116
+
117
+ Type in the box below and click the button to generate answers to your most pressing questions!
118
+
119
+ """
120
+ )
121
+
122
+ with gr.Row():
123
+ with gr.Column(scale=3):
124
+ instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
125
+
126
+ with gr.Box():
127
+ gr.Markdown("**Answer**")
128
+ output = gr.Markdown(elem_id="q-output")
129
+ submit = gr.Button("Generate", variant="primary")
130
+ gr.Examples(
131
+ examples=examples,
132
+ inputs=[instruction],
133
+ cache_examples=False,
134
+ fn=process_example,
135
+ outputs=[output],
136
+ )
137
+
138
+
139
+
140
+ submit.click(generate, inputs=[instruction], outputs=[output])
141
+ instruction.submit(generate, inputs=[instruction], outputs=[output])
142
+
143
+ demo.queue(concurrency_count=1).launch(debug=True)
ggjt-model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de314c5ee155ac40a03ca3b3be85ba2b02aef9e9f083c411c0b4490689dd047e
3
+ size 4212864640
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ huggingface-hub
2
+ llama-cpp-python==0.1.35
3
+ gradio==3.26.0