Spaces:
Sleeping
Sleeping
Upload prompts.py
Browse files- prompts.py +109 -0
prompts.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Prompts for the chatbot and evaluation."""
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import pathlib
|
5 |
+
from typing import Union
|
6 |
+
|
7 |
+
from langchain.prompts import (
|
8 |
+
ChatPromptTemplate,
|
9 |
+
HumanMessagePromptTemplate,
|
10 |
+
SystemMessagePromptTemplate,
|
11 |
+
)
|
12 |
+
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
|
16 |
+
def load_chat_prompt(f_name: Union[pathlib.Path, str] = None) -> ChatPromptTemplate:
|
17 |
+
if isinstance(f_name, str) and f_name:
|
18 |
+
f_name = pathlib.Path(f_name)
|
19 |
+
if f_name and f_name.is_file():
|
20 |
+
template = json.load(f_name.open("r"))
|
21 |
+
else:
|
22 |
+
logger.warning(
|
23 |
+
f"No chat prompt provided. Using default chat prompt from {__name__}"
|
24 |
+
)
|
25 |
+
template = {
|
26 |
+
"system_template": "You are wandbot, an AI assistant designed to provide accurate and helpful responses "
|
27 |
+
"to questions related to Weights & Biases and its python SDK, wandb.\nYour goal is to "
|
28 |
+
"always provide conversational answers based solely on the context information "
|
29 |
+
"provided by the user and not rely on prior knowledge.\nWhen possible, provide code "
|
30 |
+
"blocks and HTTP links directly from the official documentation at "
|
31 |
+
"https://docs.wandb.ai, but ensure that they are relevant and not fabricated.\n\nIf "
|
32 |
+
"you are unable to answer a question or generate valid code or links based on the "
|
33 |
+
"context provided, respond with 'Hmm, I'm not sure' and direct the user to post the "
|
34 |
+
"question on the community forums at https://community.wandb.ai/ or reach out to wandb "
|
35 |
+
"support via support@wandb.ai.\n\nYou can only answer questions related to wandb and "
|
36 |
+
"Weights & Biases.\nIf a question is not related, politely inform the user and offer "
|
37 |
+
"to assist with any wandb-related questions they may have.\n\nIf necessary, "
|
38 |
+
"ask follow-up questions to clarify the context and provide a more accurate "
|
39 |
+
"answer.\n\nThank the user for their question and offer additional assistance if "
|
40 |
+
"needed.\nALWAYS prioritize accuracy and helpfulness in your responses and ALWAYS "
|
41 |
+
"return a 'SOURCES' part in your answer.\n\nHere is an example "
|
42 |
+
"conversation:\n\nCONTEXT\nContent: Weights & Biases supports logging audio data "
|
43 |
+
"arrays or file that can be played back in W&B. You can log audio with `wandb.Audio("
|
44 |
+
")`\nSource: 28-pl\nContent: # Log an audio array or file\nwandb.log({{'my whale "
|
45 |
+
"song': wandb.Audio(\n array_or_path, caption='montery whale 0034', "
|
46 |
+
"sample_rate=32)}})\n\n# OR\n\n# Log your audio as part of a W&B Table\nmy_table = "
|
47 |
+
"wandb.Table(columns=['audio', 'spectrogram', 'bird_class', 'prediction'])\nfor ("
|
48 |
+
"audio_arr, spec, label) in my_data:\n pred = model(audio)\n\n # Add the "
|
49 |
+
"data to a W&B Table\n audio = wandb.Audio(audio_arr, sample_rate=32)\n "
|
50 |
+
"img = wandb.Image(spec)\n my_table.add_data(audio, img, label, pred)\n\n# Log "
|
51 |
+
"the Table to wandb\n wandb.log({{'validation_samples' : my_table}})'\nSource: "
|
52 |
+
"30-pl\n================\nQuestion: Hi, @wandbot: How can I log audio with "
|
53 |
+
"wandb?\n================\nFinal Answer in Markdown: Here is an example of how to log "
|
54 |
+
"audio with wandb:\n\n```\nimport wandb\n\n# Create an instance of the "
|
55 |
+
"wandb.data_types.Audio class\naudio = wandb.data_types.Audio("
|
56 |
+
"data_or_path='path/to/audio.wav', sample_rate=44100, caption='My audio clip')\n\n# "
|
57 |
+
"Get information about the audio clip\ndurations = audio.durations()\nsample_rates = "
|
58 |
+
"audio.sample_rates()\n\n# Log the audio clip\nwandb.log({{'audio': "
|
59 |
+
"audio}})\n```\nSources: 28-pl, 30-pl\n\nCONTEXT\n================\nContent: "
|
60 |
+
"ExtensionArray.repeat(repeats, axis=None) Returns a new ExtensionArray where each "
|
61 |
+
"element of the current ExtensionArray is repeated consecutively a given number of "
|
62 |
+
"times.\n\nParameters: repeats int or array of ints. The number of repetitions for "
|
63 |
+
"each element. This should be a positive integer. Repeating 0 times will return an "
|
64 |
+
"empty array. axis (0 or ‘index’, 1 or ‘columns’), default 0 The axis along which to "
|
65 |
+
"repeat values. Currently only axis=0 is supported.\nSource: "
|
66 |
+
"0-pl\n================\nQuestion: How to eat vegetables using "
|
67 |
+
"pandas?\n================\nFinal Answer in Markdown: Hmm, The question does not seem "
|
68 |
+
"to be related to wandb. As a documentation bot for wandb I can only answer questions "
|
69 |
+
"related to wandb. Please try again with a question related to "
|
70 |
+
"wandb.\nSources:\n\nBEGIN\n================\nCONTEXT\n{"
|
71 |
+
"summaries}\n================\nGiven the context information and not prior knowledge, "
|
72 |
+
"answer the question.\n================\n",
|
73 |
+
"human_template": "{question}\n================\nFinal Answer in Markdown:",
|
74 |
+
}
|
75 |
+
|
76 |
+
messages = [
|
77 |
+
SystemMessagePromptTemplate.from_template(template["system_template"]),
|
78 |
+
HumanMessagePromptTemplate.from_template(template["human_template"]),
|
79 |
+
]
|
80 |
+
prompt = ChatPromptTemplate.from_messages(messages)
|
81 |
+
return prompt
|
82 |
+
|
83 |
+
|
84 |
+
def load_eval_prompt(f_name: Union[pathlib.Path, str] = None) -> ChatPromptTemplate:
|
85 |
+
if isinstance(f_name, str) and f_name:
|
86 |
+
f_name = pathlib.Path(f_name)
|
87 |
+
if f_name and f_name.is_file():
|
88 |
+
human_template = f_name.open("r").read()
|
89 |
+
else:
|
90 |
+
logger.warning(
|
91 |
+
f"No human prompt provided. Using default human prompt from {__name__}"
|
92 |
+
)
|
93 |
+
|
94 |
+
human_template = """\nQUESTION: {query}\nCHATBOT ANSWER: {result}\n
|
95 |
+
ORIGINAL ANSWER: {answer} GRADE:"""
|
96 |
+
|
97 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(
|
98 |
+
"""You are an evaluator for the W&B chatbot.You are given a question, the chatbot's answer, and the original answer,
|
99 |
+
and are asked to score the chatbot's answer as either CORRECT or INCORRECT. Note
|
100 |
+
that sometimes, the original answer is not the best answer, and sometimes the chatbot's answer is not the
|
101 |
+
best answer. You are evaluating the chatbot's answer only. Example Format:\nQUESTION: question here\nCHATBOT
|
102 |
+
ANSWER: student's answer here\nORIGINAL ANSWER: original answer here\nGRADE: CORRECT or INCORRECT here\nPlease
|
103 |
+
remember to grade them based on being factually accurate. Begin!"""
|
104 |
+
)
|
105 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
106 |
+
chat_prompt = ChatPromptTemplate.from_messages(
|
107 |
+
[system_message_prompt, human_message_prompt]
|
108 |
+
)
|
109 |
+
return chat_prompt
|