oscarwang2
commited on
Commit
•
ad19bf8
1
Parent(s):
886b081
Update app.py
Browse files
app.py
CHANGED
@@ -3,10 +3,10 @@ import os
|
|
3 |
import gradio as gr
|
4 |
import threading
|
5 |
import time
|
6 |
-
from
|
7 |
|
8 |
-
# Initialize
|
9 |
-
client =
|
10 |
|
11 |
# Constants
|
12 |
MAX_SIZE = 1.1 * 1024 * 1024 * 1024 # 1.1GB in bytes
|
@@ -37,28 +37,52 @@ def generate_and_save_data():
|
|
37 |
while True:
|
38 |
try:
|
39 |
# Generate a prompt
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
44 |
temperature=1,
|
|
|
45 |
top_p=1,
|
46 |
-
|
|
|
47 |
)
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
# Use the generated prompt to query the model again
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
56 |
temperature=1,
|
|
|
57 |
top_p=1,
|
58 |
-
|
|
|
59 |
)
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
# Update the combined token count
|
64 |
combined_tokens += (prompt_tokens + response_tokens)
|
@@ -76,10 +100,12 @@ def generate_and_save_data():
|
|
76 |
current_file = os.path.join(DATA_DIRECTORY, f'data{file_index}.csv')
|
77 |
file_paths.append(current_file)
|
78 |
# Create the new file with headers
|
79 |
-
|
|
|
80 |
else:
|
81 |
# Append data to the current file
|
82 |
-
|
|
|
83 |
|
84 |
# Wait for the next update interval
|
85 |
time.sleep(UPDATE_INTERVAL)
|
@@ -137,4 +163,4 @@ with gr.Blocks() as app:
|
|
137 |
token_refresh = gr.Button("Refresh Token Count")
|
138 |
token_refresh.click(update_token_display, outputs=token_display)
|
139 |
|
140 |
-
app.launch(
|
|
|
3 |
import gradio as gr
|
4 |
import threading
|
5 |
import time
|
6 |
+
from groq import Groq
|
7 |
|
8 |
+
# Initialize Groq client
|
9 |
+
client = Groq()
|
10 |
|
11 |
# Constants
|
12 |
MAX_SIZE = 1.1 * 1024 * 1024 * 1024 # 1.1GB in bytes
|
|
|
37 |
while True:
|
38 |
try:
|
39 |
# Generate a prompt
|
40 |
+
completion = client.chat.completions.create(
|
41 |
+
model="gemma2-9b-it",
|
42 |
+
messages=[
|
43 |
+
{
|
44 |
+
"role": "user",
|
45 |
+
"content": "give me a single prompt to prompt an ai model, simulating what users could want from you. ensure that it is diverse and high quality. for each, choose a random writing style (though it has to be a common one), random length and random clarity of the prompt. ensure that it is a single prompt, and just the prompt itself, nothing else. eg, don't close the prompt in quotation marks or say Here is a single prompt that meets your requirements or anything similar to that"
|
46 |
+
}
|
47 |
+
],
|
48 |
temperature=1,
|
49 |
+
max_tokens=1024,
|
50 |
top_p=1,
|
51 |
+
stream=True,
|
52 |
+
stop=None,
|
53 |
)
|
54 |
+
|
55 |
+
prompt = ""
|
56 |
+
prompt_tokens = 0
|
57 |
+
for chunk in completion:
|
58 |
+
content = chunk.choices[0].delta.content
|
59 |
+
if content:
|
60 |
+
prompt += content
|
61 |
+
prompt_tokens += len(content.split())
|
62 |
|
63 |
# Use the generated prompt to query the model again
|
64 |
+
second_completion = client.chat.completions.create(
|
65 |
+
model="gemma2-9b-it",
|
66 |
+
messages=[
|
67 |
+
{
|
68 |
+
"role": "user",
|
69 |
+
"content": prompt
|
70 |
+
}
|
71 |
+
],
|
72 |
temperature=1,
|
73 |
+
max_tokens=5000,
|
74 |
top_p=1,
|
75 |
+
stream=True,
|
76 |
+
stop=None,
|
77 |
)
|
78 |
+
|
79 |
+
response = ""
|
80 |
+
response_tokens = 0
|
81 |
+
for chunk in second_completion:
|
82 |
+
content = chunk.choices[0].delta.content
|
83 |
+
if content:
|
84 |
+
response += content
|
85 |
+
response_tokens += len(content.split())
|
86 |
|
87 |
# Update the combined token count
|
88 |
combined_tokens += (prompt_tokens + response_tokens)
|
|
|
100 |
current_file = os.path.join(DATA_DIRECTORY, f'data{file_index}.csv')
|
101 |
file_paths.append(current_file)
|
102 |
# Create the new file with headers
|
103 |
+
with open(current_file, 'w') as f:
|
104 |
+
data.to_csv(f, header=True, index=False)
|
105 |
else:
|
106 |
# Append data to the current file
|
107 |
+
with open(current_file, 'a') as f:
|
108 |
+
data.to_csv(f, header=False, index=False)
|
109 |
|
110 |
# Wait for the next update interval
|
111 |
time.sleep(UPDATE_INTERVAL)
|
|
|
163 |
token_refresh = gr.Button("Refresh Token Count")
|
164 |
token_refresh.click(update_token_display, outputs=token_display)
|
165 |
|
166 |
+
app.launch()
|