Felladrin commited on
Commit
042a761
·
verified ·
1 Parent(s): b2c2948

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -6
app.py CHANGED
@@ -20,21 +20,33 @@ def generate(
20
  prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
21
 
22
  if model_name == "Felladrin/Pythia-31M-Chat-v1":
23
- outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=2, repetition_penalty=1.0016)
24
  elif model_name == "Felladrin/Llama-68M-Chat-v1":
25
- outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.043)
26
  elif model_name == "Felladrin/Smol-Llama-101M-Chat-v1":
27
- outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.105)
28
  elif model_name == "Felladrin/Llama-160M-Chat-v1":
29
- outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.01)
30
  elif model_name == "Felladrin/TinyMistral-248M-Chat-v4":
31
- outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=5, repetition_penalty=1.0)
 
 
32
  else:
33
- outputs = pipe(prompt, max_new_tokens=250, do_sample=True, temperature=0.65, top_k=35, top_p=0.55, repetition_penalty=1.176)
34
 
35
  return outputs[0]["generated_text"]
36
 
37
  model_choices = [
 
 
 
 
 
 
 
 
 
 
38
  "Felladrin/Llama-160M-Chat-v1",
39
  "Felladrin/TinyMistral-248M-Chat-v4",
40
  "Felladrin/Llama-68M-Chat-v1",
 
20
  prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
21
 
22
  if model_name == "Felladrin/Pythia-31M-Chat-v1":
23
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, penalty_alpha=0.5, top_k=2, repetition_penalty=1.0016)
24
  elif model_name == "Felladrin/Llama-68M-Chat-v1":
25
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.043)
26
  elif model_name == "Felladrin/Smol-Llama-101M-Chat-v1":
27
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.105)
28
  elif model_name == "Felladrin/Llama-160M-Chat-v1":
29
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.01)
30
  elif model_name == "Felladrin/TinyMistral-248M-Chat-v4":
31
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, penalty_alpha=0.5, top_k=5, repetition_penalty=1.0)
32
+ elif "Felladrin/Minueza-2-96M-Instruct-Variant" in model_name:
33
+ outputs = pipe(prompt, max_new_tokens=512, use_cache=True, do_sample=True, temperature=0.7, top_p=0.9, top_k=0, min_p=0.1, repetition_penalty=1.17)
34
  else:
35
+ outputs = pipe(prompt, max_new_tokens=512, do_sample=True, temperature=0.65, top_k=35, top_p=0.55, repetition_penalty=1.176)
36
 
37
  return outputs[0]["generated_text"]
38
 
39
  model_choices = [
40
+ "Felladrin/Minueza-2-96M-Instruct-Variant-10",
41
+ "Felladrin/Minueza-2-96M-Instruct-Variant-09",
42
+ "Felladrin/Minueza-2-96M-Instruct-Variant-08",
43
+ "Felladrin/Minueza-2-96M-Instruct-Variant-07",
44
+ "Felladrin/Minueza-2-96M-Instruct-Variant-06",
45
+ "Felladrin/Minueza-2-96M-Instruct-Variant-05",
46
+ "Felladrin/Minueza-2-96M-Instruct-Variant-04",
47
+ "Felladrin/Minueza-2-96M-Instruct-Variant-03",
48
+ "Felladrin/Minueza-2-96M-Instruct-Variant-02",
49
+ "Felladrin/Minueza-2-96M-Instruct-Variant-01",
50
  "Felladrin/Llama-160M-Chat-v1",
51
  "Felladrin/TinyMistral-248M-Chat-v4",
52
  "Felladrin/Llama-68M-Chat-v1",