oscarwang2 commited on
Commit
ad19bf8
1 Parent(s): 886b081

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -20
app.py CHANGED
@@ -3,10 +3,10 @@ import os
3
  import gradio as gr
4
  import threading
5
  import time
6
- from gradio_client import Client
7
 
8
- # Initialize Gradio client
9
- client = Client("Nymbo/Llama-3.1-405B-Instruct")
10
 
11
  # Constants
12
  MAX_SIZE = 1.1 * 1024 * 1024 * 1024 # 1.1GB in bytes
@@ -37,28 +37,52 @@ def generate_and_save_data():
37
  while True:
38
  try:
39
  # Generate a prompt
40
- prompt_result = client.predict(
41
- message="give me a single prompt to prompt an ai model, simulating what users could want from you. ensure that it is diverse and high quality. for each, choose a random writing style (though it has to be a common one), random length and random clarity of the prompt. ensure that it is a single prompt, and just the prompt itself, nothing else. eg, don't close the prompt in quotation marks or say Here is a single prompt that meets your requirements or anything similar to that",
42
- system_message="",
43
- max_tokens=1024,
 
 
 
 
44
  temperature=1,
 
45
  top_p=1,
46
- api_name="/chat"
 
47
  )
48
- prompt = prompt_result['message']
49
- prompt_tokens = len(prompt.split())
 
 
 
 
 
 
50
 
51
  # Use the generated prompt to query the model again
52
- response_result = client.predict(
53
- message=prompt,
54
- system_message="",
55
- max_tokens=5000,
 
 
 
 
56
  temperature=1,
 
57
  top_p=1,
58
- api_name="/chat"
 
59
  )
60
- response = response_result['message']
61
- response_tokens = len(response.split())
 
 
 
 
 
 
62
 
63
  # Update the combined token count
64
  combined_tokens += (prompt_tokens + response_tokens)
@@ -76,10 +100,12 @@ def generate_and_save_data():
76
  current_file = os.path.join(DATA_DIRECTORY, f'data{file_index}.csv')
77
  file_paths.append(current_file)
78
  # Create the new file with headers
79
- data.to_csv(current_file, index=False)
 
80
  else:
81
  # Append data to the current file
82
- data.to_csv(current_file, mode='a', header=False, index=False)
 
83
 
84
  # Wait for the next update interval
85
  time.sleep(UPDATE_INTERVAL)
@@ -137,4 +163,4 @@ with gr.Blocks() as app:
137
  token_refresh = gr.Button("Refresh Token Count")
138
  token_refresh.click(update_token_display, outputs=token_display)
139
 
140
- app.launch(show_error=True)
 
3
  import gradio as gr
4
  import threading
5
  import time
6
+ from groq import Groq
7
 
8
+ # Initialize Groq client
9
+ client = Groq()
10
 
11
  # Constants
12
  MAX_SIZE = 1.1 * 1024 * 1024 * 1024 # 1.1GB in bytes
 
37
  while True:
38
  try:
39
  # Generate a prompt
40
+ completion = client.chat.completions.create(
41
+ model="gemma2-9b-it",
42
+ messages=[
43
+ {
44
+ "role": "user",
45
+ "content": "give me a single prompt to prompt an ai model, simulating what users could want from you. ensure that it is diverse and high quality. for each, choose a random writing style (though it has to be a common one), random length and random clarity of the prompt. ensure that it is a single prompt, and just the prompt itself, nothing else. eg, don't close the prompt in quotation marks or say Here is a single prompt that meets your requirements or anything similar to that"
46
+ }
47
+ ],
48
  temperature=1,
49
+ max_tokens=1024,
50
  top_p=1,
51
+ stream=True,
52
+ stop=None,
53
  )
54
+
55
+ prompt = ""
56
+ prompt_tokens = 0
57
+ for chunk in completion:
58
+ content = chunk.choices[0].delta.content
59
+ if content:
60
+ prompt += content
61
+ prompt_tokens += len(content.split())
62
 
63
  # Use the generated prompt to query the model again
64
+ second_completion = client.chat.completions.create(
65
+ model="gemma2-9b-it",
66
+ messages=[
67
+ {
68
+ "role": "user",
69
+ "content": prompt
70
+ }
71
+ ],
72
  temperature=1,
73
+ max_tokens=5000,
74
  top_p=1,
75
+ stream=True,
76
+ stop=None,
77
  )
78
+
79
+ response = ""
80
+ response_tokens = 0
81
+ for chunk in second_completion:
82
+ content = chunk.choices[0].delta.content
83
+ if content:
84
+ response += content
85
+ response_tokens += len(content.split())
86
 
87
  # Update the combined token count
88
  combined_tokens += (prompt_tokens + response_tokens)
 
100
  current_file = os.path.join(DATA_DIRECTORY, f'data{file_index}.csv')
101
  file_paths.append(current_file)
102
  # Create the new file with headers
103
+ with open(current_file, 'w') as f:
104
+ data.to_csv(f, header=True, index=False)
105
  else:
106
  # Append data to the current file
107
+ with open(current_file, 'a') as f:
108
+ data.to_csv(f, header=False, index=False)
109
 
110
  # Wait for the next update interval
111
  time.sleep(UPDATE_INTERVAL)
 
163
  token_refresh = gr.Button("Refresh Token Count")
164
  token_refresh.click(update_token_display, outputs=token_display)
165
 
166
+ app.launch()