llm-human-feedback-collector-chat-interface-dpo / chat_interface_preference.py
davidberenstein1957's picture
Update LLM preference collector
17aeee6
raw
history blame
42.2 kB
"""
This file defines a useful high-level abstraction to build Gradio chatbots: ChatInterface.
"""
from __future__ import annotations
import datetime
import functools
import inspect
import json
import random
import re
import uuid
from typing import AsyncGenerator, Callable, List, Literal, Union, cast
import anyio
from gradio.blocks import Blocks
from gradio.components import (
Button,
Chatbot,
Component,
Markdown,
MultimodalTextbox,
State,
Textbox,
get_component_instance,
)
from gradio.events import Dependency, on
from gradio.helpers import Error, Info, special_args
from gradio.helpers import create_examples as Examples # noqa: N812
from gradio.layouts import Accordion, Group, Row
from gradio.routes import Request
from gradio.themes import ThemeClass as Theme
from gradio.utils import SyncToAsyncIterator, async_iteration, async_lambda
from gradio_client.documentation import document
from huggingface_hub import CommitScheduler
pattern = re.compile(r'<div class="message-identifier">(.*?)</div>', re.DOTALL)
PREFERENCE_TECHNIQUE_MAPPING = {"sft": "prompt", "dpo": "preference", "kto": "vibes"}
@document()
class ChatInterface(Blocks):
"""
ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create
a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which
takes a function that governs the response of the chatbot based on the user input and chat history. Additional
parameters can be used to control the appearance and behavior of the demo.
Example:
import gradio as gr
def echo(message, history):
return message
demo = gr.ChatInterface(fn=echo, examples=["hello", "hola", "merhaba"], title="Echo Bot")
demo.launch()
Demos: chatinterface_multimodal, chatinterface_random_response, chatinterface_streaming_echo
Guides: creating-a-chatbot-fast, sharing-your-app
"""
def __init__(
self,
fn: Callable,
*,
prefence_techniques: str | List[str] | None = None,
min_turns: int = 1,
max_turns: int = 1,
repo_id: None | str,
repo_private: bool = False,
multimodal: bool = False,
chatbot: Chatbot | None = None,
textbox: Textbox | MultimodalTextbox | None = None,
additional_inputs: str | Component | list[str | Component] | None = None,
additional_inputs_accordion_name: str | None = None,
additional_inputs_accordion: str | Accordion | None = None,
examples: list[str] | list[dict[str, str | list]] | list[list] | None = None,
cache_examples: bool | Literal["lazy"] | None = None,
examples_per_page: int = 10,
title: str | None = None,
description: str | None = None,
theme: Theme | str | None = None,
css: str | None = None,
js: str | None = None,
head: str | None = None,
analytics_enabled: bool | None = None,
autofocus: bool = True,
concurrency_limit: int | None | Literal["default"] = "default",
fill_height: bool = True,
delete_cache: tuple[int, int] | None = None,
):
"""
Parameters:
fn: The function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format.
multimodal: If True, the chat interface will use a gr.MultimodalTextbox component for the input, which allows for the uploading of multimedia files. If False, the chat interface will use a gr.Textbox component for the input.
chatbot: An instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.
textbox: An instance of the gr.Textbox or gr.MultimodalTextbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox or gr.MultimodalTextbox component will be created.
additional_inputs: An instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion.
additional_inputs_accordion_name: Deprecated. Will be removed in a future version of Gradio. Use the `additional_inputs_accordion` parameter instead.
additional_inputs_accordion: If a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided.
examples: Sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input. Should be a list of strings if `multimodal` is False, and a list of dictionaries (with keys `text` and `files`) if `multimodal` is True.
cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
examples_per_page: If examples are provided, how many to display per page.
title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.
description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.
theme: Theme to use, loaded from gradio.themes.
css: Custom css as a string or path to a css file. This css will be included in the demo webpage.
js: Custom js as a string or path to a js file. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags.
head: Custom html to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page.
analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.
submit_btn: Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used.
stop_btn: Text to display on the stop button, which replaces the submit_btn when the submit_btn or retry_btn is clicked and response is streaming. Clicking on the stop_btn will halt the chatbot response. If set to None, stop button functionality does not appear in the chatbot. If a Button object, that button will be used as the stop button.
retry_btn: Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used.
undo_btn: Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used.
clear_btn: Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used.
autofocus: If True, autofocuses to the textbox when the page loads.
concurrency_limit: If set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default).
fill_height: If True, the chat interface will expand to the height of window.
delete_cache: A tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur.
"""
if max_turns < min_turns:
raise ValueError("`max_turns` should be larger than `min_turns`")
if any([turn for turn in [max_turns, min_turns] if turn < 1]):
raise ValueError("`max_turns` should be larger than `min_turns`")
self.max_turns = max_turns
self.min_turns = min_turns
if isinstance(prefence_techniques, str):
prefence_techniques = [prefence_techniques]
elif prefence_techniques is None:
prefence_techniques = ["sft"]
self.prefence_techniques = [technique.lower() for technique in prefence_techniques]
optional_techniques = ["kto", "sft", "spin", "dpo", "simpo", "rlhf", "orpo"]
if any([technique for technique in self.prefence_techniques if technique not in optional_techniques]):
raise ValueError(f"Supported techniques are {optional_techniques}")
submit_btn_one = "Generate"
submit_btn_two = None
submit_btn_a = None
submit_btn_b = None
submit_btn_ab = None
submit_btn_good = None
submit_btn_bad = None
stop_btn = "Stop"
undo_btn = "↩️ Undo"
clear_btn = "🗑️ Log and clear"
if "kto" in prefence_techniques:
submit_btn_good = "Log response 👍"
submit_btn_bad = "Log response 👎"
if any([technique for technique in ["dpo", "simpo", "rlhf", "orpo"] if technique in self.prefence_techniques]):
submit_btn_two = "Generate 2"
submit_btn_a = "Log preference 🅰️"
submit_btn_b = "Log preference 🅱️"
submit_btn_ab = "Continue random 🅰️=🅱️"
super().__init__(
analytics_enabled=analytics_enabled,
mode="chat_interface",
css=css,
title=title or "Gradio",
theme=theme,
js=js,
head=head,
fill_height=fill_height,
delete_cache=delete_cache,
)
self.css = css
self.multimodal = multimodal
self.concurrency_limit = concurrency_limit
self.fn = fn
self.is_async = inspect.iscoroutinefunction(self.fn) or inspect.isasyncgenfunction(self.fn)
self.is_generator = inspect.isgeneratorfunction(self.fn) or inspect.isasyncgenfunction(self.fn)
self.buttons: list[Button | None] = []
self.examples = examples
self.cache_examples: bool | None | Literal["lazy"] = cache_examples
self._set_conversation_id()
if repo_id:
self.commit_scheduler = CommitScheduler(
repo_id=repo_id, folder_path="feedback", repo_type="dataset", private=repo_private, every=1
)
else:
self.commit_scheduler = None
if self.commit_scheduler:
self.data_file = self.commit_scheduler.folder_path / f"data_{uuid.uuid4()}.json"
if additional_inputs:
if not isinstance(additional_inputs, list):
additional_inputs = [additional_inputs]
self.additional_inputs = [get_component_instance(i) for i in additional_inputs] # type: ignore
else:
self.additional_inputs = []
if additional_inputs_accordion_name is not None:
print(
"The `additional_inputs_accordion_name` parameter is deprecated and will be removed in a future version of Gradio. Use the `additional_inputs_accordion` parameter instead."
)
self.additional_inputs_accordion_params = {"label": additional_inputs_accordion_name}
if additional_inputs_accordion is None:
self.additional_inputs_accordion_params = {
"label": "Additional Inputs",
"open": False,
}
elif isinstance(additional_inputs_accordion, str):
self.additional_inputs_accordion_params = {"label": additional_inputs_accordion}
elif isinstance(additional_inputs_accordion, Accordion):
self.additional_inputs_accordion_params = additional_inputs_accordion.recover_kwargs(
additional_inputs_accordion.get_config()
)
else:
raise ValueError(
f"The `additional_inputs_accordion` parameter must be a string or gr.Accordion, not {type(additional_inputs_accordion)}"
)
with self:
if title:
Markdown(f"<h1 style='text-align: center; margin-bottom: 1rem'>{self.title}</h1>")
if description:
Markdown(description)
if self.commit_scheduler:
Markdown(
f"## Data is being logged to a datset on the hub: [{self.commit_scheduler.repo_id}](https://huggingface.co/datasets/{self.commit_scheduler.repo_id})"
)
Markdown(f"### Techniques: {self.prefence_techniques}")
Markdown(f"### MIN TURNS: {self.min_turns} - MAX TURN: {self.max_turns}")
if chatbot:
self.chatbot = chatbot.render()
else:
self.chatbot = Chatbot(label="Chatbot", scale=1, height=200 if fill_height else None)
with Row():
for btn in [
submit_btn_a,
submit_btn_b,
submit_btn_ab,
submit_btn_good,
submit_btn_bad,
undo_btn,
clear_btn,
]:
if btn is not None:
if isinstance(btn, Button):
btn.render()
elif isinstance(btn, str):
btn = Button(btn, variant="secondary", size="sm", min_width=60)
else:
raise ValueError(
f"All the _btn parameters must be a gr.Button, string, or None, not {type(btn)}"
)
self.buttons.append(btn) # type: ignore
with Group():
with Row():
if textbox:
if self.multimodal:
submit_btn_one = None
submit_btn_two = None
else:
textbox.container = False
textbox.show_label = False
textbox_ = textbox.render()
if not isinstance(textbox_, (Textbox, MultimodalTextbox)):
raise TypeError(
f"Expected a gr.Textbox or gr.MultimodalTextbox component, but got {type(textbox_)}"
)
self.textbox = textbox_
elif self.multimodal:
submit_btn_one = None
submit_btn_two = None
self.textbox = MultimodalTextbox(
show_label=False,
label="Message",
placeholder="Type a message...",
scale=7,
autofocus=autofocus,
)
else:
self.textbox = Textbox(
container=False,
show_label=False,
label="Message",
placeholder="Type a message...",
scale=7,
autofocus=autofocus,
)
submit_buttons = []
for btn in [submit_btn_one, submit_btn_two]:
if btn is not None and not multimodal:
if isinstance(btn, Button):
btn.render()
elif isinstance(btn, str):
btn = Button(
btn,
variant="primary",
scale=1,
min_width=150,
)
else:
raise ValueError(
f"The submit_btn parameter must be a gr.Button, string, or None, not {type(btn)}"
)
submit_buttons.append(btn)
if stop_btn is not None:
if isinstance(stop_btn, Button):
stop_btn.visible = False
stop_btn.render()
elif isinstance(stop_btn, str):
stop_btn = Button(
stop_btn,
variant="stop",
visible=False,
scale=1,
min_width=150,
)
else:
raise ValueError(
f"The stop_btn parameter must be a gr.Button, string, or None, not {type(stop_btn)}"
)
self.buttons.extend(submit_buttons + [stop_btn]) # type: ignore
self.fake_api_btn = Button("Fake API", visible=False)
self.fake_response_textbox = Textbox(label="Response", visible=False)
(
self.submit_btn_a,
self.submit_btn_b,
self.submit_btn_ab,
self.submit_btn_good,
self.submit_btn_bad,
self.undo_btn,
self.clear_btn,
self.submit_btn_one,
self.submit_btn_two,
self.stop_btn,
) = self.buttons
if examples:
if self.is_generator:
examples_fn = self._examples_stream_fn
else:
examples_fn = self._examples_fn
self.examples_handler = Examples(
examples=examples,
inputs=[self.textbox] + self.additional_inputs,
outputs=self.chatbot,
fn=examples_fn,
cache_examples=self.cache_examples,
_defer_caching=True,
examples_per_page=examples_per_page,
)
any_unrendered_inputs = any(not inp.is_rendered for inp in self.additional_inputs)
if self.additional_inputs and any_unrendered_inputs:
with Accordion(**self.additional_inputs_accordion_params): # type: ignore
for input_component in self.additional_inputs:
if not input_component.is_rendered:
input_component.render()
# The example caching must happen after the input components have rendered
if examples:
self.examples_handler._start_caching()
self.saved_input = State()
self.chatbot_state = State(self.chatbot.value) if self.chatbot.value else State([])
self._setup_events()
self._setup_api()
def _set_conversation_id(self):
self._conversation_id = str(uuid.uuid4())
def _save_feedback(self, item):
feedback = {
"timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"conversation_id": self._conversation_id,
}
feedback.update(item)
if self.commit_scheduler:
with self.commit_scheduler.lock:
with self.data_file.open("a") as f:
f.write(json.dumps(feedback))
def _setup_events(self) -> None:
submit_fn_one = self._stream_fn if self.is_generator else self._submit_fn
submit_triggers_one = (
[self.textbox.submit, self.submit_btn_one.click] if self.submit_btn_one else [self.textbox.submit]
)
submit_tuples = [(submit_fn_one, submit_triggers_one)]
if self.submit_btn_two:
submit_fn_two = functools.partial(submit_fn_one, n_generations=2)
submit_triggers_two = [self.submit_btn_two.click]
submit_tuples.append((submit_fn_two, submit_triggers_two))
for _fn, _triggers in submit_tuples:
submit_event = (
on(
_triggers,
self._clear_and_save_textbox,
[self.textbox],
[self.textbox, self.saved_input],
show_api=False,
queue=False,
)
.then(
self._display_input,
[self.saved_input, self.chatbot_state],
[self.chatbot, self.chatbot_state],
show_api=False,
queue=False,
)
.then(
_fn,
[self.saved_input, self.chatbot_state] + self.additional_inputs,
[self.chatbot, self.chatbot_state],
show_api=False,
concurrency_limit=cast(Union[int, Literal["default"], None], self.concurrency_limit),
)
)
self._setup_stop_events(_triggers, submit_event)
partial_fn_a, partial_fn_b, partial_fn_ab, partial_fn_good, partial_fn_bad = (
functools.partial(self._log_fn, log="a"),
functools.partial(self._log_fn, log="b"),
functools.partial(self._log_fn, log="ab"),
functools.partial(self._log_fn, log="good"),
functools.partial(self._log_fn, log="bad"),
)
for _fn, _btn in [
(partial_fn_a, self.submit_btn_a),
(partial_fn_b, self.submit_btn_b),
(partial_fn_ab, self.submit_btn_ab),
(partial_fn_good, self.submit_btn_good),
(partial_fn_bad, self.submit_btn_bad),
]:
if _btn:
_btn.click(
_fn,
[self.saved_input, self.chatbot_state],
[self.chatbot, self.saved_input, self.chatbot_state],
show_api=False,
queue=False,
).then(
async_lambda(lambda x: x),
[self.saved_input],
[self.textbox],
show_api=False,
queue=False,
)
if self.undo_btn:
self.undo_btn.click(
self._delete_prev_fn,
[self.saved_input, self.chatbot_state],
[self.chatbot, self.saved_input, self.chatbot_state],
show_api=False,
queue=False,
).then(
async_lambda(lambda x: x),
[self.saved_input],
[self.textbox],
show_api=False,
queue=False,
)
if self.clear_btn:
self.clear_btn.click(
self._clear_fn,
[self.saved_input, self.chatbot_state],
[self.chatbot, self.saved_input, self.chatbot_state],
show_api=False,
queue=False,
).then(
async_lambda(lambda x: x),
[self.saved_input],
[self.textbox],
show_api=False,
queue=False,
)
def _setup_stop_events(self, event_triggers: list[Callable], event_to_cancel: Dependency) -> None:
if self.stop_btn and self.is_generator:
if self.submit_btn_one:
for event_trigger in event_triggers:
event_trigger(
async_lambda(
lambda: (
Button(visible=False),
Button(visible=True),
)
),
None,
[self.submit_btn_one, self.stop_btn],
show_api=False,
queue=False,
)
event_to_cancel.then(
async_lambda(lambda: (Button(visible=True), Button(visible=False))),
None,
[self.submit_btn_one, self.stop_btn],
show_api=False,
queue=False,
)
else:
for event_trigger in event_triggers:
event_trigger(
async_lambda(lambda: Button(visible=True)),
None,
[self.stop_btn],
show_api=False,
queue=False,
)
event_to_cancel.then(
async_lambda(lambda: Button(visible=False)),
None,
[self.stop_btn],
show_api=False,
queue=False,
)
self.stop_btn.click(
None,
None,
None,
cancels=event_to_cancel,
show_api=False,
)
def _setup_api(self) -> None:
if self.is_generator:
@functools.wraps(self.fn)
async def api_fn(message, history, *args, **kwargs): # type: ignore
if self.is_async:
generator = self.fn(message, history, *args, **kwargs)
else:
generator = await anyio.to_thread.run_sync(
self.fn, message, history, *args, **kwargs, limiter=self.limiter
)
generator = SyncToAsyncIterator(generator, self.limiter)
try:
first_response = await async_iteration(generator)
yield first_response, history + [[message, first_response]]
except StopIteration:
yield None, history + [[message, None]]
async for response in generator:
yield response, history + [[message, response]]
else:
@functools.wraps(self.fn)
async def api_fn(message, history, *args, **kwargs):
if self.is_async:
response = await self.fn(message, history, *args, **kwargs)
else:
response = await anyio.to_thread.run_sync(
self.fn, message, history, *args, **kwargs, limiter=self.limiter
)
history.append([message, response])
return response, history
self.fake_api_btn.click(
api_fn,
[self.textbox, self.chatbot_state] + self.additional_inputs,
[self.textbox, self.chatbot_state],
api_name="chat",
concurrency_limit=cast(Union[int, Literal["default"], None], self.concurrency_limit),
)
def _clear_and_save_textbox(self, message: str) -> tuple[str | dict, str]:
if self.multimodal:
return {"text": "", "files": []}, message
else:
return "", message
def _append_multimodal_history(
self,
message: dict[str, list],
response: str | None,
history: list[list[str | tuple | None]],
):
for x in message["files"]:
history.append([(x,), None])
if message["text"] is None or not isinstance(message["text"], str):
return
elif message["text"] == "" and message["files"] != []:
history.append([None, response])
else:
history.append([message["text"], response])
async def _display_input(
self, message: str | dict[str, list], history: list[list[str | tuple | None]]
) -> tuple[list[list[str | tuple | None]], list[list[str | tuple | None]]]:
if self.multimodal and isinstance(message, dict):
self._append_multimodal_history(message, None, history)
elif isinstance(message, str):
history.append([message, None])
return history, history
def _get_conversation_from_history(self, history):
conversation = ""
history[-1] = [history[-1][0], ""]
for idx, turn in enumerate(history):
conversation += self._get_chat_message(turn[0], role="system", turn=(idx + 1))
if turn[-1]:
conversation += self._get_chat_message(turn[-1], role="user", turn=(idx + 1))
return self.css + "<body>" + conversation + "</body>"
def _get_conversation_in_openai_format(self, history):
conversation = []
for idx, turn in enumerate(history):
roles = ["user", "assistant"]
if idx == len(turn) - 1:
roles = ["user"]
for role, content in zip(roles, turn):
conversation.append({"role": role, "content": content})
return conversation
@staticmethod
def _get_chat_message(message, role, turn):
if role == "user":
justify = "right"
else:
justify = "left"
return (
f'<div class="{role}-message" style="justify-content: {justify};">'
+ '<div class="message-content">'
+ f"<strong>Turn {turn} - {role.capitalize()}:</strong><br>"
+ f"<em>Length: {len(message)} characters</em><br><br>"
+ f'<div class="message-identifier">{message}</div>'
+ "</div></div>"
)
def _get_chat_message_comparison(self, content_a, content_b):
return (
'<div class="container">'
+ '<div class="column">'
+ self._get_chat_message(message=content_a, role="system", turn="A")
+ "</div>"
+ '<div class="column">'
+ self._get_chat_message(message=content_b, role="system", turn="B")
+ "</div>"
+ "</div>"
)
@staticmethod
def _check_if_two_responses(response):
if response:
matches = pattern.findall(response)
return matches
def _check_num_turns(self, history, generate=True):
if generate:
if len(history) >= self.max_turns:
raise Error(
f"We intend to collect conversations with a maximum of {self.max_turns}, please clear or log info first."
)
return history, history
else:
if len(history) < self.min_turns:
raise Error(
f"We intend to collect conversations with at least of {self.min_turns}, please continue the conversation first."
)
return history, history
@staticmethod
def _check_message(message):
if not message:
raise Error("Make sure to provide a message next time.")
async def _submit_fn(
self,
message: str | dict[str, list],
history_with_input: list[list[str | tuple | None]],
request: Request,
n_generations: int = 1,
*args,
) -> tuple[list[list[str | tuple | None]], list[list[str | tuple | None]]]:
if self.multimodal and isinstance(message, dict):
remove_input = len(message["files"]) + 1 if message["text"] is not None else len(message["files"])
history = history_with_input[:-remove_input]
else:
history = history_with_input[:-1]
self._check_message(message)
self._check_num_turns(history)
_, response = history_with_input[-1]
if self._check_if_two_responses(response):
raise Error("Two options detected: undo, log or random pick continuation.")
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
async def _get_response():
if self.is_async:
response = await self.fn(*inputs)
else:
response = await anyio.to_thread.run_sync(self.fn, *inputs, limiter=self.limiter)
return response
if n_generations == 1:
response = await _get_response()
else:
response_one, response_two = await _get_response(), await _get_response()
response = self._get_chat_message_comparison(response_one, response_two)
if self.multimodal and isinstance(message, dict):
self._append_multimodal_history(message, response, history)
elif isinstance(message, str):
history.append([message, response])
return history, history
async def _stream_fn(
self,
message: str | dict[str, list],
history_with_input: list[list[str | tuple | None]],
request: Request,
n_generations: int = 1,
*args,
) -> AsyncGenerator:
if self.multimodal and isinstance(message, dict):
remove_input = len(message["files"]) + 1 if message["text"] is not None else len(message["files"])
history = history_with_input[:-remove_input]
else:
history = history_with_input[:-1]
self._check_message(message)
self._check_num_turns(history)
_, response = history_with_input[-1]
if self._check_if_two_responses(response):
raise Error("Two options detected: undo, log or random pick continuation.")
_, response = history_with_input[-1]
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
try:
if self.is_async:
generator = self.fn(*inputs)
else:
generator = await anyio.to_thread.run_sync(self.fn, *inputs, limiter=self.limiter)
generator = SyncToAsyncIterator(generator, self.limiter)
first_response = await async_iteration(generator)
if n_generations == 2:
first_response_formatted = self._get_chat_message_comparison(first_response, "")
else:
first_response_formatted = first_response
if self.multimodal and isinstance(message, dict):
for x in message["files"]:
history.append([(x,), None])
update = history + [[message["text"], first_response_formatted]]
yield update, update
else:
update = history + [[message, first_response_formatted]]
yield update, update
except StopIteration:
if self.multimodal and isinstance(message, dict):
self._append_multimodal_history(message, None, history)
yield history, history
else:
update = history + [[message, None]]
yield update, update
async for response in generator:
if n_generations == 2:
response_formatted = self._get_chat_message_comparison(response, "")
else:
response_formatted = response
if self.multimodal and isinstance(message, dict):
update = history + [[message["text"], response_formatted]]
yield update, update
else:
update = history + [[message, response_formatted]]
yield update, update
if n_generations == 2:
if self.is_async:
generator_two = self.fn(*inputs)
else:
generator_two = await anyio.to_thread.run_sync(self.fn, *inputs, limiter=self.limiter)
generator_two = SyncToAsyncIterator(generator_two, self.limiter)
try:
first_response_two = await async_iteration(generator_two)
first_response_two_formatted = self._get_chat_message_comparison(response, first_response_two)
if self.multimodal and isinstance(message, dict):
for x in message["files"]:
history.append([(x,), None])
update = history + [[message["text"], first_response_two_formatted]]
yield update, update
else:
update = history + [[message, first_response_two_formatted]]
yield update, update
except StopIteration:
if self.multimodal and isinstance(message, dict):
self._append_multimodal_history(message, None, history)
yield history, history
else:
update = history + [[message, None]]
yield update, update
async for response_two in generator_two:
response_two = self._get_chat_message_comparison(response, response_two)
if self.multimodal and isinstance(message, dict):
update = history + [[message["text"], response_two]]
yield update, update
else:
update = history + [[message, response_two]]
yield update, update
async def _log_fn(
self, message: str | dict[str, list], history: list[list[str | tuple | None]], log: str
) -> tuple[
list[list[str | tuple | None]],
str | dict[str, list],
list[list[str | tuple | None]],
]:
self._check_num_turns(history, generate=False)
history_as_openai_format = self._get_conversation_in_openai_format(history)
feedback = {"prompt": history_as_openai_format}
prompt, response = history[-1]
matches = self._check_if_two_responses(response)
if matches and log != "prompt":
option_a, option_b = matches[0], matches[1]
if log == "a":
chosen, rejected = option_a, option_b
Info("Logged preference: a")
elif log == "b":
chosen, rejected = option_b, option_a
Info("Logged preference: b")
elif log == "ab":
options = [option_a, option_b]
chosen, rejected = random.choice([options])
Info("Picked random response to continue")
if log in ["a", "b"] and self.commit_scheduler:
feedback.update(
{
"chosen": [{"content": chosen, "role": "assistant"}],
"rejected": [{"content": rejected, "role": "assistant"}],
}
)
self._save_feedback(feedback)
elif log == "ab":
self._save_feedback(feedback)
history[-1] = [prompt, chosen]
return history, message or "", history
elif log in ["conversation", "good", "bad"]:
feedback.update({"response": response})
if log == "good":
feedback.update({"label": True})
elif log == "bad":
feedback.update({"label": False})
Info("Logged conversation")
self._save_feedback(feedback)
return history, "", history
else:
raise Error("Error in code w.r.t logging.")
async def _examples_fn(self, message: str, *args) -> list[list[str | None]]:
inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
if self.is_async:
response = await self.fn(*inputs)
else:
response = await anyio.to_thread.run_sync(self.fn, *inputs, limiter=self.limiter)
return [[message, response]]
async def _examples_stream_fn(
self,
message: str,
*args,
) -> AsyncGenerator:
inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
if self.is_async:
generator = self.fn(*inputs)
else:
generator = await anyio.to_thread.run_sync(self.fn, *inputs, limiter=self.limiter)
generator = SyncToAsyncIterator(generator, self.limiter)
async for response in generator:
yield [[message, response]]
async def _delete_prev_fn(
self,
message: str | dict[str, list],
history: list[list[str | tuple | None]],
) -> tuple[
list[list[str | tuple | None]],
str | dict[str, list],
list[list[str | tuple | None]],
]:
if self.multimodal and isinstance(message, dict):
remove_input = len(message["files"]) + 1 if message["text"] is not None else len(message["files"])
history = history[:-remove_input]
else:
history = history[:-1]
return history, message or "", history
async def _clear_fn(
self,
message: str | dict[str, list],
history: list[list[str | tuple | None]],
) -> tuple[
list[list[str | tuple | None]],
str | dict[str, list],
list[list[str | tuple | None]],
]:
_, response = history[-1]
if self._check_if_two_responses(response):
raise Error("First log preference or continue random.")
else:
await self._log_fn(message=message, history=history, log="prompt")
self._set_conversation_id()
return [], "", []