acecalisto3 commited on
Commit
2bf0c81
·
verified ·
1 Parent(s): 14cbb01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -24
app.py CHANGED
@@ -10,6 +10,8 @@ from selenium.webdriver.chrome.service import Service
10
  from selenium.webdriver.chrome.options import Options
11
  from webdriver_manager.chrome import ChromeDriverManager
12
  from huggingface_hub import InferenceClient
 
 
13
 
14
  # Configure logging
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -98,9 +100,70 @@ def handle_input(storage_location, url1, url2, scrape_interval, content_type):
98
  monitor_urls(storage_location, url1, url2, scrape_interval, content_type)
99
  return TASK_PROMPT.format(task=current_task, history="\n".join(history))
100
 
101
- # Define the chat response function
102
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
 
104
  def respond(
105
  message,
106
  history,
@@ -109,29 +172,15 @@ def respond(
109
  temperature,
110
  top_p,
111
  ):
112
- messages = [{"role": "system", "content": system_message}]
113
-
114
- for val in history:
115
- if val[0]:
116
- messages.append({"role": "user", "content": val[0]})
117
- if val[1]:
118
- messages.append({"role": "assistant", "content": val[1]})
119
-
120
- messages.append({"role": "user", "content": message})
121
-
122
- response = ""
123
-
124
- for message in client.chat_completion(
125
- messages,
126
- max_tokens=max_tokens,
127
- stream=True,
128
  temperature=temperature,
129
- top_p=top_p,
130
- ):
131
- token = message.choices[0].delta.content
132
-
133
- response += token
134
- yield response
135
 
136
  # Create Gradio interface
137
  def chat_interface(message, system_message, max_tokens, temperature, top_p, storage_location, url1, url2, scrape_interval, content_type):
 
10
  from selenium.webdriver.chrome.options import Options
11
  from webdriver_manager.chrome import ChromeDriverManager
12
  from huggingface_hub import InferenceClient
13
+ import random
14
+ import yaml
15
 
16
  # Configure logging
17
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
100
  monitor_urls(storage_location, url1, url2, scrape_interval, content_type)
101
  return TASK_PROMPT.format(task=current_task, history="\n".join(history))
102
 
103
+ # Load custom prompts
104
+ try:
105
+ with open('custom_prompts.yaml', 'r') as fp:
106
+ custom_prompts = yaml.safe_load(fp)
107
+ except FileNotFoundError:
108
+ custom_prompts = {
109
+ "WEB_DEV": "",
110
+ "AI_SYSTEM_PROMPT": "",
111
+ "PYTHON_CODE_DEV": "",
112
+ "CODE_GENERATION": "",
113
+ "CODE_INTERPRETATION": "",
114
+ "CODE_TRANSLATION": "",
115
+ "CODE_IMPLEMENTATION": ""
116
+ }
117
+
118
+ # Define the Mistral inference client
119
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
120
+
121
+ VERBOSE = True
122
+ MAX_HISTORY = 125
123
+
124
+ def format_prompt(message, history):
125
+ prompt = "<s>"
126
+ for user_prompt, bot_response in history:
127
+ prompt += f"[INST] {user_prompt} [/INST]"
128
+ prompt += f" {bot_response}</s> "
129
+ prompt += f"[INST] {message} [/INST]"
130
+ return prompt
131
+
132
+ agents = [
133
+ "WEB_DEV",
134
+ "AI_SYSTEM_PROMPT",
135
+ "PYTHON_CODE_DEV",
136
+ "CODE_GENERATION",
137
+ "CODE_INTERPRETATION",
138
+ "CODE_TRANSLATION",
139
+ "CODE_IMPLEMENTATION"
140
+ ]
141
+
142
+ def generate(
143
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
144
+ ):
145
+ seed = random.randint(1, 1111111111111111)
146
+ agent = custom_prompts[agent_name]
147
+
148
+ system_prompt = agent if sys_prompt == "" else sys_prompt
149
+ temperature = max(float(temperature), 1e-2)
150
+ top_p = float(top_p)
151
+
152
+ generate_kwargs = dict(
153
+ temperature=temperature,
154
+ max_new_tokens=max_new_tokens,
155
+ top_p=top_p,
156
+ repetition_penalty=repetition_penalty,
157
+ do_sample=True,
158
+ seed=seed,
159
+ )
160
+
161
+ formatted_prompt = format_prompt(f"{system_prompt}\n\n{prompt}", history)
162
+ output = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, return_full_text=False)
163
+
164
+ return output
165
 
166
+ # Define the chat response function
167
  def respond(
168
  message,
169
  history,
 
172
  temperature,
173
  top_p,
174
  ):
175
+ response = generate(
176
+ prompt=message,
177
+ history=history,
178
+ sys_prompt=system_message,
 
 
 
 
 
 
 
 
 
 
 
 
179
  temperature=temperature,
180
+ max_new_tokens=max_tokens,
181
+ top_p=top_p
182
+ )
183
+ return response
 
 
184
 
185
  # Create Gradio interface
186
  def chat_interface(message, system_message, max_tokens, temperature, top_p, storage_location, url1, url2, scrape_interval, content_type):