Tonic commited on
Commit
8ec916f
·
verified ·
1 Parent(s): 831981c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -2,6 +2,8 @@ import spaces
2
  import gradio as gr
3
  from transformers import AutoTokenizer, LlamaForCausalLM
4
  import os
 
 
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_auth_token=os.getenv("HUGGINGFACE_TOKEN"))
7
  model = LlamaForCausalLM.from_pretrained("kaist-ai/Prometheus-13b-v1.0", device_map="auto", load_in_8bit=True)
@@ -14,14 +16,18 @@ An instruction (might include an Input inside it), a response to evaluate, a ref
14
  2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.
15
  3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)\"
16
  4. Please do not generate any other opening, closing, and explanations.
 
17
  ###The instruction to evaluate:
18
  {instruction_to_evaluate}
 
19
  ###Response to evaluate:
20
  {response_to_evaluate}
 
21
  ###Reference Answer (Score 5):
22
  {reference_answer}
 
23
  ###Score Rubrics:
24
- [{criteria_description}]
25
  Score 1: {score1_description}
26
  Score 2: {score2_description}
27
  Score 3: {score3_description}
@@ -31,7 +37,7 @@ Score 5: {score5_description}
31
 
32
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
33
 
34
- outputs = model.generate(input_ids)
35
  result = tokenizer.decode(outputs[0])
36
 
37
  return result
@@ -49,7 +55,7 @@ iface = gr.Interface(
49
  gr.Textbox(label="Score 4 Description", placeholder="Enter Score 4 Description Here...", lines=2),
50
  gr.Textbox(label="Score 5 Description", placeholder="Enter Score 5 Description Here...", lines=2)
51
  ],
52
- outputs="text",
53
  title="Welcome to🌟Tonic's⚖️Prometheus",
54
  description="[🎏KAIST-AI/⚖️Prometheus](https://huggingface.co/kaist-ai/prometheus-13b-v1.0) Prometheus is an alternative of GPT-4 evaluation when doing fine-grained evaluation of an underlying LLM & a Reward model for Reinforcement Learning from Human Feedback (RLHF). You can use this demo to try out their model ! You can also use [🎏KAIST-AI/⚖️Prometheus](https://huggingface.co/kaist-ai/prometheus-13b-v1.0) [by cloning this space](https://huggingface.co/spaces/Tonic/prometheus/tree/main?clone=true). [🧬🔬🔍 Simply click here: 🤗](https://huggingface.co/spaces/Tonic/prometheus?duplicate=true) Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [DataTonic](https://github.com/Tonic-AI/DataTonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗",
55
  examples=[
 
2
  import gradio as gr
3
  from transformers import AutoTokenizer, LlamaForCausalLM
4
  import os
5
+ import fastchat
6
+ from fastchat.conversation import get_conv_template
7
 
8
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_auth_token=os.getenv("HUGGINGFACE_TOKEN"))
9
  model = LlamaForCausalLM.from_pretrained("kaist-ai/Prometheus-13b-v1.0", device_map="auto", load_in_8bit=True)
 
16
  2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.
17
  3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)\"
18
  4. Please do not generate any other opening, closing, and explanations.
19
+
20
  ###The instruction to evaluate:
21
  {instruction_to_evaluate}
22
+
23
  ###Response to evaluate:
24
  {response_to_evaluate}
25
+
26
  ###Reference Answer (Score 5):
27
  {reference_answer}
28
+
29
  ###Score Rubrics:
30
+ {criteria_description}
31
  Score 1: {score1_description}
32
  Score 2: {score2_description}
33
  Score 3: {score3_description}
 
37
 
38
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
39
 
40
+ outputs = model.generate(input_ids, sample=True, temperature=1.0, top_p=0.9, max_new_tokens=650, repetition_penalty=1.03)
41
  result = tokenizer.decode(outputs[0])
42
 
43
  return result
 
55
  gr.Textbox(label="Score 4 Description", placeholder="Enter Score 4 Description Here...", lines=2),
56
  gr.Textbox(label="Score 5 Description", placeholder="Enter Score 5 Description Here...", lines=2)
57
  ],
58
+ outputs="🎏KAIST-AI/⚖️Prometheus",
59
  title="Welcome to🌟Tonic's⚖️Prometheus",
60
  description="[🎏KAIST-AI/⚖️Prometheus](https://huggingface.co/kaist-ai/prometheus-13b-v1.0) Prometheus is an alternative of GPT-4 evaluation when doing fine-grained evaluation of an underlying LLM & a Reward model for Reinforcement Learning from Human Feedback (RLHF). You can use this demo to try out their model ! You can also use [🎏KAIST-AI/⚖️Prometheus](https://huggingface.co/kaist-ai/prometheus-13b-v1.0) [by cloning this space](https://huggingface.co/spaces/Tonic/prometheus/tree/main?clone=true). [🧬🔬🔍 Simply click here: 🤗](https://huggingface.co/spaces/Tonic/prometheus?duplicate=true) Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [DataTonic](https://github.com/Tonic-AI/DataTonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗",
61
  examples=[