Tonic commited on
Commit
27d5e20
1 Parent(s): 4694c68

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
7
  import sentencepiece
8
 
9
  title = "Welcome to Tonic's 🐋🐳Orca-2-13B!"
10
- description = "You can use [🐋🐳microsoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TonicsOrca2?duplicate=true) . [Join me on Discord to build together](https://discord.gg/VqTxc76K3u). Big thanks to the HuggingFace Organisation for the Community Grant."
11
 
12
  # os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
13
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -45,8 +45,10 @@ class OrcaChatBot:
45
  temperature=temperature,
46
  top_p=top_p,
47
  repetition_penalty=repetition_penalty,
48
- pad_token_id=self.tokenizer.eos_token_id
 
49
  )
 
50
 
51
  # Decode the generated response
52
  response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
7
  import sentencepiece
8
 
9
  title = "Welcome to Tonic's 🐋🐳Orca-2-13B!"
10
+ description = "You can use [🐋🐳microsoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TonicsOrca2?duplicate=true) . [Join my active builders' server on discord](https://discord.gg/VqTxc76K3u). Big thanks to the HuggingFace Organisation for the Community Grant."
11
 
12
  # os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
13
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
45
  temperature=temperature,
46
  top_p=top_p,
47
  repetition_penalty=repetition_penalty,
48
+ pad_token_id=self.tokenizer.eos_token_id,
49
+ do_sample=True # Enable sampling-based generation
50
  )
51
+
52
 
53
  # Decode the generated response
54
  response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)