imperialwool commited on
Commit
13b479a
1 Parent(s): 7eaa8db

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +1 -2
gradio_app.py CHANGED
@@ -28,7 +28,7 @@ Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-pytho
28
  with open('system.prompt', 'r', encoding='utf-8') as f:
29
  prompt = f.read()
30
 
31
- def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
32
  logs = f"Request: {request}\nMax tokens: {max_tokens}\nLanguage: {language}\nCustom prompt: {custom_prompt}\n"
33
  try:
34
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
@@ -68,7 +68,6 @@ demo = gr.Interface(
68
  inputs=[
69
  gr.components.Textbox(label="Input"),
70
  gr.components.Number(value=256),
71
- gr.components.Dropdown(label="Target Language", value="en", choices=["en"]+languages),
72
  gr.components.Textbox(label="Custom system prompt"),
73
  ],
74
  outputs=[
 
28
  with open('system.prompt', 'r', encoding='utf-8') as f:
29
  prompt = f.read()
30
 
31
+ def generate_answer(request: str, max_tokens: int = 256, custom_prompt: str = None):
32
  logs = f"Request: {request}\nMax tokens: {max_tokens}\nLanguage: {language}\nCustom prompt: {custom_prompt}\n"
33
  try:
34
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
 
68
  inputs=[
69
  gr.components.Textbox(label="Input"),
70
  gr.components.Number(value=256),
 
71
  gr.components.Textbox(label="Custom system prompt"),
72
  ],
73
  outputs=[