lvwerra HF staff pcuenq HF staff commited on
Commit
6e419a7
1 Parent(s): 57659db

Infilling update (untested) (#1)

Browse files

- Infilling update (untested) (b00b30b03d4f1b8740b97ff52aa4059fb063a042)


Co-authored-by: Pedro Cuenca <pcuenq@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -13,13 +13,14 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
 
14
  API_URL = "https://api-inference.huggingface.co/models/codellama/CodeLlama-7b-hf"
15
 
16
- FIM_PREFIX = "<fim_prefix>"
17
- FIM_MIDDLE = "<fim_middle>"
18
- FIM_SUFFIX = "<fim_suffix>"
19
 
20
  FIM_INDICATOR = "<FILL_HERE>"
21
 
22
  EOS_STRING = "</s>"
 
23
 
24
  theme = gr.themes.Monochrome(
25
  primary_hue="indigo",
@@ -78,7 +79,7 @@ def generate(
78
 
79
  previous_token = ""
80
  for response in stream:
81
- if response.token.text == EOS_STRING:
82
  if fim_mode:
83
  output += suffix
84
  else:
 
13
 
14
  API_URL = "https://api-inference.huggingface.co/models/codellama/CodeLlama-7b-hf"
15
 
16
+ FIM_PREFIX = "<PRE> "
17
+ FIM_MIDDLE = " <MID>"
18
+ FIM_SUFFIX = "<SUF>"
19
 
20
  FIM_INDICATOR = "<FILL_HERE>"
21
 
22
  EOS_STRING = "</s>"
23
+ EOT_STRING = "<EOT>"
24
 
25
  theme = gr.themes.Monochrome(
26
  primary_hue="indigo",
 
79
 
80
  previous_token = ""
81
  for response in stream:
82
+ if response.token.text in [EOS_STRING, EOT_STRING]:
83
  if fim_mode:
84
  output += suffix
85
  else: