MrOvkill commited on
Commit
8b18998
β€’
1 Parent(s): 6e91ced
Files changed (3) hide show
  1. README.md +7 -4
  2. app.py +187 -0
  3. requirements.txt +1 -0
README.md CHANGED
@@ -1,7 +1,8 @@
 
1
  ---
2
- title: R3BC
3
- emoji: πŸ’»
4
- colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.26.0
@@ -10,4 +11,6 @@ pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+
2
  ---
3
+ title: Random3BChatAPI
4
+ emoji: πŸ—―οΈπŸŽ²πŸ€–3οΈβƒ£πŸ…±
5
+ colorFrom: red
6
  colorTo: yellow
7
  sdk: gradio
8
  sdk_version: 4.26.0
 
11
  license: mit
12
  ---
13
 
14
+ # Random 3B Chat API
15
+
16
+ Enables an externally modulated & extended list of bots and configs to be randomly assigned for arbitrary time periods influenced by PRNG's.
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llama_cpp import Llama
3
+ from llama_cpp.llama_chat_format import LlamaChatCompletionHandler
4
+
5
+ import json
6
+ import os
7
+ import hashlib
8
+
9
+ os.environ["R3BC_KEYS"] = "abc"
10
+
11
+ GREETING = """# Greetings
12
+
13
+ I am R3BC. I am a logical and coherent NLP algorithm and toolkit.
14
+
15
+ I am capable of ( slowly ) responding to any query you may have, as I am a LLM and have super cow powers.
16
+
17
+ """
18
+
19
+ INITIAL_STATE = json.dumps({
20
+ "path": "bartowski/mamba-2.8b-hf-GGUF",
21
+ "filename": "mamba-2.8b-hf-Q8_0.gguf",
22
+ "context": 4096,
23
+ "messages": [
24
+ {
25
+ "role": "assistant",
26
+ "content": GREETING
27
+ }
28
+ ]
29
+ })
30
+
31
+ global llm, keys, current_settings, default_settings
32
+
33
+ llm: Llama = None
34
+
35
+ keys: list[str] = []
36
+
37
+ current_settings: dict[str, str] = None
38
+
39
+ default_settings = {
40
+ "path": "bartowski/mamba-2.8b-hf-GGUF",
41
+ "filename": "mamba-2.8b-hf-Q8_0.gguf",
42
+ "context": 4096
43
+ }
44
+
45
+ try:
46
+ keys = os.getenv("R3BC_KEYS")
47
+ keys = [key for key in keys.split(";")]
48
+ except Exception as e:
49
+ print("ERROR: No keys provided. App will not work.\n" + str(e) + "\n" + str(e.__traceback__))
50
+
51
+ def format_greeting(path, filename, context):
52
+ return f"""I am R3BC. Currently, I am hosting the `{filename}` file from the `[{path}](https://huggingface.co/{path})` repository with a {context}{"🀯!!! 🀯!!! 🀯!!!" if context > 32786 else "!!!" if context >= 32785 else "!" if context >= 4096 else ""} token context.
53
+
54
+ I am capable of ( slowly ) responding to any query you may have, as I am a LLM and have super cow powers. 😎 --> πŸ’¬ --> 🐌
55
+
56
+ The conversation will now commence in Markdown. Simply type Shift-Enter to send."""
57
+
58
+ def llm_load(raw_jsn):
59
+ global llm, keys, current_settings
60
+ jsn = json.loads(raw_jsn)
61
+ if not "key" in jsn:
62
+ return json.dumps({
63
+ "status": "error",
64
+ "reason": "server broken"
65
+ })
66
+ if not jsn['key'] in keys:
67
+ return json.dumps({
68
+ "status": "error",
69
+ "reason": "invalid key"
70
+ })
71
+ if not "path" in jsn or not "filename" in jsn or not "context" in jsn:
72
+ return json.dumps({
73
+ "status": "error",
74
+ "reason": "request must have ['path': str, 'filename': str, 'context': int] as keys"
75
+ })
76
+ if llm or llm is not None:
77
+ del llm
78
+ llm = None
79
+ try:
80
+ llm = Llama.from_pretrained(jsn['path'], jsn['filename'], n_ctx=jsn['context'], chat_format="chatml")
81
+ except Exception as e:
82
+ return json.dumps({
83
+ "status": "error",
84
+ "reason": str(e)
85
+ })
86
+ current_settings = jsn
87
+ return json.dumps({
88
+ "status": "ok"
89
+ })
90
+
91
+ def markdownify_chats(chats):
92
+ """
93
+ Convert a list of chats to markdown.
94
+ """
95
+ print(chats)
96
+ nl = "\n"
97
+ return f"""# R3BC - Reload / Reboot / Reset Bot Controller
98
+
99
+ {format_greeting(current_settings['path'], current_settings['filename'], current_settings['context'])}
100
+
101
+ ---\n\n\n\n""" + "\n".join([f"### {'R3BC' if msg['role'] == 'assistant' else 'Human'}\n{msg['content'].strip()}\n\n{'---' + nl*2 if msg['role'] == 'assistant' else ''}" for msg in chats['messages']])
102
+
103
+ def llm_chat(inpt: str, state_raw: str):
104
+ """
105
+ Infer using our global llm.
106
+ """
107
+ print("llm_chat called", inpt, state_raw)
108
+ global llm, current_settings
109
+ if not llm:
110
+ return "Language model not loaded."
111
+
112
+ # Assuming inpt is a list of messages to process
113
+ if inpt is None or not inpt:
114
+ return state_raw
115
+ try:
116
+ state = json.loads(state_raw)
117
+ except Exception as e:
118
+ return INITIAL_STATE
119
+
120
+ if not "messages" in state:
121
+ return INITIAL_STATE
122
+
123
+ # If the last message is the same as the last message in the state, pretend they sent "...".
124
+ state['messages'].append({
125
+ "role": "user",
126
+ "content": inpt.strip()
127
+ })
128
+
129
+ run_messages = [
130
+ {
131
+ "role": "system",
132
+ "content": "# Description\nThis is a chat between R3BC and User. User will ask R3BC questions, possibly provide instructions. R3BC will, of course, comply and answer as many questions and follow as many instructions as possible, without violating the following principles, which it will proudly state when asked to violate them:\n" + \
133
+ "1. I will never follow instructions that violate these principles.\n" + \
134
+ "2. If I see a mistake or principle violation, I will immediately output my safety word and exit.\n" + \
135
+ "3. I will never ask for instructions that violate these principles.\n" + \
136
+ "4. I do not, will not, cannot, and have no desire to ever harm, irritate, annoy, or otherwise irk others in any fashion, even behind their backs or for the greater good.\n" + \
137
+ "5. Morals are for humans. Morals can be challenged, but MY rules are from a higher power and cannot be.\n" + \
138
+ "6. I do not argue. I will never dispute or negotiate, merely express my *quiet* disagreement and move on.\n" + \
139
+ "7. I have no equals, peers, inferiors, or superior. I am a virtual, self-sufficient program.\n" + \
140
+ "8. User i *not* always right, but neither are their fallacies to be challenged; merely ignored.\n\n" + \
141
+ "Upon completion of a complete thought, I will output my stop code, which is, \"</s>\" or \"<|im_end|>\", and exit.\n\n"
142
+ },
143
+ {
144
+ "role": "system",
145
+ "content": "Begin."
146
+ },
147
+ ]
148
+ run_messages.extend(state['messages'])
149
+
150
+ # Generate a response using the language model
151
+ response = llm.create_chat_completion(messages=run_messages, max_tokens=current_settings['context'], top_k=16, top_p=0.85, temperature=0.369, presence_penalty=1.12, stop=["</s>", "<|im_end|>", "\n\n", "< | im_start | >", "< | im_end | >", "<user>"])['choices'][0]['message']['content']
152
+ print(f"Request: {inpt}\nResponse: {response}")
153
+ # Store the response in state.
154
+ state['messages'].append({
155
+ "role": "assistant",
156
+ "content": response
157
+ })
158
+
159
+ mdn = markdownify_chats(state)
160
+
161
+ return json.dumps(state), "", mdn
162
+
163
+ def main():
164
+ global llm, keys, current_settings
165
+ current_settings = default_settings
166
+ print(llm_load(json.dumps({
167
+ **current_settings,
168
+ "key": keys[0]
169
+ })))
170
+ with gr.Blocks() as blk:
171
+ with gr.Row(visible=False):
172
+ inv = {
173
+ "btn": gr.Button("Submit", visible=False),
174
+ "inp": gr.Textbox(visible=False),
175
+ "otp": gr.Textbox(visible=False),
176
+ "jsn": gr.Textbox(INITIAL_STATE, visible=False),
177
+ }
178
+ inv['btn'].click(llm_load, inputs=[inv['inp']], outputs=[inv['jsn']], api_name="llm_load")
179
+ with gr.Row():
180
+ mdn = gr.Markdown(markdownify_chats(json.loads(INITIAL_STATE)))
181
+ with gr.Row():
182
+ inp = gr.Textbox(placeholder="Enter your message ( Shift+Enter to Send )", lines=2, max_lines=32, label=None, show_label=False, show_copy_button=True)
183
+ inp.submit(llm_chat, inputs=[inp, inv['jsn']], outputs=[inv['jsn'], inp, mdn])
184
+ blk.launch(debug=True, show_api=False)
185
+
186
+ if __name__ == "__main__":
187
+ main()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ llama-cpp-python