ysharma HF staff commited on
Commit
7fd3a74
1 Parent(s): c86c2f3

added small writeup

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -14,6 +14,7 @@ DESCRIPTION = '# Llama-2 7B chat'
14
  if not torch.cuda.is_available():
15
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
16
 
 
17
 
18
  def clear_and_save_textbox(message: str) -> tuple[str, str]:
19
  return '', message
@@ -60,6 +61,7 @@ def fn(
60
 
61
  with gr.Blocks(css='style.css') as demo:
62
  gr.Markdown(DESCRIPTION)
 
63
  gr.DuplicateButton(value='Duplicate Space for private use',
64
  elem_id='duplicate-button',
65
  visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
 
14
  if not torch.cuda.is_available():
15
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
16
 
17
+ WRITEUP = """This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, running transformers latest release. Read more about the Llamav2 release on Huggingface in our [Blog](https://huggingface.co/blog/llama2). To have your own dedicated endpoint, you can [deploy it on Inference Endpoints](https://ui.endpoints.huggingface.co/) or duplicate the Space and provide for a GPU. We also have the [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) demo running on Spaces. """
18
 
19
  def clear_and_save_textbox(message: str) -> tuple[str, str]:
20
  return '', message
 
61
 
62
  with gr.Blocks(css='style.css') as demo:
63
  gr.Markdown(DESCRIPTION)
64
+ gr.Markdown(WRITEUP)
65
  gr.DuplicateButton(value='Duplicate Space for private use',
66
  elem_id='duplicate-button',
67
  visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')