imperialwool commited on
Commit
486de6b
1 Parent(s): 5927241

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +7 -4
gradio_app.py CHANGED
@@ -26,15 +26,18 @@ Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-pytho
26
  # Loading prompt
27
  with open('system.prompt', 'r', encoding='utf-8') as f:
28
  prompt = f.read()
 
 
29
 
30
  def generate_answer(request: str, max_tokens: int = 256, custom_prompt: str = None):
31
  logs = f"Request: {request}\nMax tokens: {max_tokens}\nCustom prompt: {custom_prompt}\n"
32
  try:
33
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
34
- if isinstance(custom_prompt, str) and len(custom_prompt.strip()) > 1 and custom_prompt.strip() not in ['', None, ' ']:
35
- userPrompt = custom_prompt.replace("{prompt}", request)
36
- else:
37
- userPrompt = prompt.replace("{prompt}", request)
 
38
  logs += f"\nFinal prompt: {userPrompt}\n"
39
  except:
40
  return "Not enough data! Check that you passed all needed data.", logs
 
26
  # Loading prompt
27
  with open('system.prompt', 'r', encoding='utf-8') as f:
28
  prompt = f.read()
29
+ with open('system.message', 'r', encoding='utf-8') as f:
30
+ system_message = f.read()
31
 
32
  def generate_answer(request: str, max_tokens: int = 256, custom_prompt: str = None):
33
  logs = f"Request: {request}\nMax tokens: {max_tokens}\nCustom prompt: {custom_prompt}\n"
34
  try:
35
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
36
+ userPrompt = prompt.replace("{prompt}", request)
37
+ userPrompt = userPrompt.replace(
38
+ "{system_message}",
39
+ custom_prompt if isinstance(custom_prompt, str) and len(custom_prompt.strip()) > 1 and custom_prompt.strip() not in ['', None, ' ']) else system_message
40
+ )
41
  logs += f"\nFinal prompt: {userPrompt}\n"
42
  except:
43
  return "Not enough data! Check that you passed all needed data.", logs