sandz7 commited on
Commit
f606aad
β€’
1 Parent(s): b6eee7b

update UI text

Browse files
Files changed (1) hide show
  1. app.py +7 -13
app.py CHANGED
@@ -13,7 +13,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
  DESCRIPTION = '''
14
  <div>
15
  <h1 style="text-align: center;">Loki πŸ‘οΈ</h1>
16
- <p>This uses Llama 3 and GPT-4o as generation, both of these make the final generation. <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B"><b>Llama3-8b</b></a> and <a href="https://platform.openai.com/docs/models/gpt-4o"><b>GPT-4o</b></a></p>
17
  </div>
18
  '''
19
 
@@ -27,19 +27,13 @@ terminators = [
27
 
28
  @spaces.GPU(duration=120)
29
  def chat_llama3_8b(message: str,
30
- history: list,
31
- temperature: float,
32
- max_new_tokens: int
33
- ) -> str:
34
  """
35
- Generate a streaming response using the llama3-8b model.
36
- Args:
37
- message (str): The input message.
38
- history (list): The conversation history used by ChatInterface.
39
- temperature (float): The temperature for generating the response.
40
- max_new_tokens (int): The maximum number of new tokens to generate.
41
- Returns:
42
- str: The generated response.
43
  """
44
  conversation = []
45
  for user, assistant in history:
 
13
  DESCRIPTION = '''
14
  <div>
15
  <h1 style="text-align: center;">Loki πŸ‘οΈ</h1>
16
+ <p>This uses an open source Large Language Model called <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B"><b>Llama3-8b</b></a></p>
17
  </div>
18
  '''
19
 
 
27
 
28
  @spaces.GPU(duration=120)
29
  def chat_llama3_8b(message: str,
30
+ history: list,
31
+ temperature: float,
32
+ max_new_tokens: int
33
+ ) -> str:
34
  """
35
+ Passes input, converts in tokens, generate's with ids and outputs
36
+ the text out.
 
 
 
 
 
 
37
  """
38
  conversation = []
39
  for user, assistant in history: