ragrqs / static /javascript /rag_payload.js
gmaterni's picture
Upload folder using huggingface_hub
cd92039 verified
/** @format */
function getPayloadDoc(prompt) {
return {
model: "",
temperature: 0.3,
max_tokens: 1024,
stream: false,
random_seed: 42,
messages: [{ role: "user", content: prompt }],
safe_prompt: false,
};
}
function getPayloadBuildContext(prompt) {
return {
model: "",
messages: [{ role: "user", content: prompt }],
temperature: 0.3,
max_tokens: 2000,
stream: false,
safe_prompt: false,
random_seed: 42,
};
}
function getPayloadWithContext(prompt) {
return {
model: "",
messages: [{ role: "user", content: prompt }],
temperature: 0.3,
max_tokens: 2000,
stream: false,
safe_prompt: false,
random_seed: 42,
};
}
function getPayloadThread(prompt) {
return {
model: "",
messages: [{ role: "user", content: prompt }],
temperature: 0.7,
max_tokens: 2000,
stream: false,
safe_prompt: false,
random_seed: 42,
};
}
/*
function getPayloadDoc(prompt) {
const payload = {
inputs: prompt,
parameters: {
task: "text2text-generation",
max_new_tokens: 2000,
num_return_sequences: 1,
temperature: 0.4,
top_p: 0.85,
top_k: 30,
do_sample: false,
no_repeat_ngram_size: 3,
num_beams: 4,
repetition_penalty: 1.2,
return_full_text: false,
details: false,
max_time: 90.0,
seed: 42,
},
options: {
use_cache: false,
wait_for_model: true,
},
};
return payload;
}
function getPayloadBuildContext(prompt) {
const payload = {
inputs: prompt,
parameters: {
task: "text2text-generation",
max_new_tokens: 6000,
num_return_sequences: 1,
temperature: 0.7,
top_p: 0.85,
top_k: 30,
do_sample: false,
no_repeat_ngram_size: 4,
num_beams: 6,
repetition_penalty: 1.2,
return_full_text: false,
details: false,
max_time: 180.0,
seed: 42,
},
options: {
use_cache: false,
wait_for_model: true,
},
};
return payload;
}
function getPayloadWithContext(prompt) {
const payload = {
inputs: prompt,
parameters: {
task: "text2text-generation",
max_new_tokens: 4000,
num_return_sequences: 1,
temperature: 0.7,
top_p: 0.85,
top_k: 30,
do_sample: false,
no_repeat_ngram_size: 4,
num_beams: 5,
repetition_penalty: 1.4,
return_full_text: false,
details: false,
max_time: 120.0,
seed: 42,
},
options: {
use_cache: false,
wait_for_model: true,
},
};
return payload;
}
function getPayloadThread(prompt) {
const payload = {
inputs: prompt,
parameters: {
task: "text2text-generation",
max_new_tokens: 6048,
num_return_sequences: 1,
temperature: 0.7,
top_p: 0.85,
top_k: 30,
do_sample: false,
no_repeat_ngram_size: 4,
num_beams: 5,
repetition_penalty: 1.4,
return_full_text: false,
details: false,
max_time: 120.0,
seed: 42,
},
options: {
use_cache: false,
wait_for_model: true,
},
};
return payload;
}
*/