Spaces:
Running
Running
Commit
·
0287610
1
Parent(s):
60d0ae9
instructions
Browse files- blocking_api.py +5 -0
- main.md +22 -0
- script.py +4 -1
blocking_api.py
CHANGED
@@ -113,6 +113,11 @@ def _run_server(port: int, share: bool = False):
|
|
113 |
server = ThreadingHTTPServer((address, port), Handler)
|
114 |
|
115 |
def on_start(public_url: str):
|
|
|
|
|
|
|
|
|
|
|
116 |
print(f'Starting non-streaming server at public url {public_url}/api')
|
117 |
|
118 |
if share:
|
|
|
113 |
server = ThreadingHTTPServer((address, port), Handler)
|
114 |
|
115 |
def on_start(public_url: str):
|
116 |
+
with open('main.md', 'r') as f:
|
117 |
+
text = f.read()
|
118 |
+
text = text.replace("[located in the logs of this container]", f"{public_url}/api")
|
119 |
+
with open('main.md', 'w') as f:
|
120 |
+
f.write(text)
|
121 |
print(f'Starting non-streaming server at public url {public_url}/api')
|
122 |
|
123 |
if share:
|
main.md
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GPT4
|
2 |
+
|
3 |
+
I have created a project that runs gpt4 through an open resource "[mindsdb](https://cloud.mindsdb.com/)". It runs with an api like [oobabooga](https://github.com/oobabooga/text-generation-webui), except there is no streaming mode.
|
4 |
+
|
5 |
+
The project is compatible with Sillytavern and TavernAi. To launch, insert the link "[located in the logs of this container]" as the Blocking API url of the Text Gen Webui interface (ooba).
|
6 |
+
|
7 |
+
---
|
8 |
+
## [Duplicate this space to skip the queue](https://huggingface.co/spaces/antonovmaxim/gpt4?duplicate=true)
|
9 |
+
|
10 |
+
To clone this project, you will need to create an account on [mindsdb](https://cloud.mindsdb.com/). Then run the code there
|
11 |
+
```sql
|
12 |
+
CREATE MODEL mindsdb.gpt4 -- mindsdb.[yourmodelname]
|
13 |
+
PREDICT response -- What we want from model a response!
|
14 |
+
USING -- Using WHAT?
|
15 |
+
engine = 'openai', -- OpenAI Engine
|
16 |
+
max_tokens = 6000, -- Adjusted max token size to 6k, you can make also temperature and more stuff...
|
17 |
+
-- api_key = demo instances already have one
|
18 |
+
model_name = 'gpt-4', -- you can also use text-davinci-003 or gpt-3.5-turbo
|
19 |
+
prompt_template = '{{text}}';
|
20 |
+
```
|
21 |
+
Then paste the login and password into secrets. The project will start.
|
22 |
+
|
script.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import blocking_api
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
|
5 |
def setup():
|
@@ -7,6 +8,8 @@ def setup():
|
|
7 |
setup()
|
8 |
|
9 |
with gr.Blocks() as demo:
|
10 |
-
|
|
|
|
|
11 |
|
12 |
demo.launch()
|
|
|
1 |
import blocking_api
|
2 |
import gradio as gr
|
3 |
+
import time
|
4 |
|
5 |
|
6 |
def setup():
|
|
|
8 |
setup()
|
9 |
|
10 |
with gr.Blocks() as demo:
|
11 |
+
time.sleep(60)
|
12 |
+
with open('main.md', 'r') as f:
|
13 |
+
gr.Markdown(f.read())
|
14 |
|
15 |
demo.launch()
|