mkthoma commited on
Commit
6ea445d
1 Parent(s): cc541f7

app update

Browse files
Files changed (1) hide show
  1. app.py +13 -19
app.py CHANGED
@@ -6,28 +6,22 @@ model_path = "finetuned_phi2"
6
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
7
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
8
 
9
- # gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
10
- # result = gen(prompt)
11
- # print(result[0]['generated_text'])
12
 
13
 
14
- def generate(prompt, history, temperature=0.3, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0,):
15
- temperature = float(temperature)
16
- if temperature < 1e-2:
17
- temperature = 1e-2
18
- top_p = float(top_p)
19
-
20
- generate_kwargs = dict(temperature=temperature,
21
- max_new_tokens=max_new_tokens,
22
- top_p=top_p,
23
- repetition_penalty=repetition_penalty,
24
- do_sample=True,
25
- seed=42)
 
26
 
27
- #output = gen(prompt, **generate_kwargs)
28
- gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
29
- output = gen(prompt)
30
- return output[0]['generated_text']
31
 
32
  bbchatbot = gr.Chatbot(
33
  avatar_images=["logo/user logo.png", "logo/bot logo.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
 
6
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
7
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
8
 
 
 
 
9
 
10
 
11
+ def generate(question):
12
+ system_message = "You are a question answering chatbot. Provide a clear and detailed explanation"
13
+ prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n {question} [/INST]" # replace the command here with something relevant to your task
14
+
15
+ num_new_tokens = 500 # change to the number of new tokens you want to generate
16
+ # Count the number of tokens in the prompt
17
+ num_prompt_tokens = len(tokenizer(prompt)['input_ids'])
18
+ # Calculate the maximum length for the generation
19
+ max_length = num_prompt_tokens + num_new_tokens
20
+
21
+ gen = pipeline('text-generation', model=model, tokenizer=tokenizer, max_length=max_length)
22
+ result = gen(prompt)
23
+ return (result[0]['generated_text'].replace(prompt, ''))
24
 
 
 
 
 
25
 
26
  bbchatbot = gr.Chatbot(
27
  avatar_images=["logo/user logo.png", "logo/bot logo.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)