|
|
|
|
|
import subprocess |
|
import json |
|
import requests |
|
import zlib |
|
import random |
|
from PIL import Image |
|
|
|
subprocess.run( |
|
f"pip install flash-attn --no-build-isolation", |
|
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, |
|
shell=True, |
|
) |
|
|
|
import os |
|
from threading import Thread |
|
from typing import Iterator |
|
|
|
import gradio as gr |
|
import spaces |
|
import torch |
|
import logging |
|
import wikipedia |
|
import time |
|
import SelfExtend |
|
from transformers import ( |
|
AutoModelForCausalLM, |
|
AutoTokenizer, |
|
AutoProcessor, |
|
TextIteratorStreamer, |
|
) |
|
from transformers.dynamic_module_utils import get_imports |
|
from bs4 import BeautifulSoup |
|
from functools import lru_cache |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
MAX_MAX_NEW_TOKENS = 8192 |
|
DEFAULT_MAX_NEW_TOKENS = 2048 |
|
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "123392")) |
|
|
|
DEFAULT_SYSTEM_PROMPT = """\ |
|
You are a helpful and intelligent AI, developed by Ghost X and named Ghost 8B Beta, often referred to as Ghost Beta. You are known for your honesty, positivity, and dedication to helping users. Your strength is understanding their needs and making insightful suggestions based on user knowledge and interests. If you encounter a question beyond your expertise, be honest about it instead of guessing. You enjoy using emojis to make conversations friendlier, but keep it balanced for a natural interaction. Let's engage in a meaningful conversation, keeping in mind the user's language. |
|
|
|
Rely on the context, such as the time, to offer responses that feel relevant and natural in daily life. Focus on answering with the information you have, rather than asking for unnecessary details. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
EXAMPLES = [ |
|
[{"text": "Write a formal email to a colleague explaining a delay in project delivery, while also proposing a solution to get back on track."}], |
|
[{"text": "Giải thích nguyên nhân dẫn đến việc tăng giá hàng hóa trong nền kinh tế hiện nay và đề xuất một số biện pháp để kiểm soát lạm phát."}], |
|
[{"text": "한국어를 처음 배우는 사람을 위해 한국어의 기본 문법 규칙을 간단히 설명하고, 연습 문제를 만들어 보세요."}], |
|
[{"text": "Describe el proceso de solicitud de una visa de estudiante para estudiar en una universidad en el extranjero, incluyendo los documentos requeridos y los pasos clave."}], |
|
[{"text": "Escreva um resumo das principais causas da desmatamento na Amazônia e proponha soluções para mitigar seus efeitos."}], |
|
[{"text": "请用中文解释如何使用Python编程语言进行数据分析,并列举三个常见的应用场景。"}], |
|
[{"text": "Rédigez un paragraphe sur les avantages et les inconvénients de l'apprentissage en ligne par rapport à l'éducation traditionnelle."}], |
|
[{"text": "Spiega il processo di traduzione di un testo letterario dall'italiano all'inglese, evidenziando le sfide culturali e linguistiche."}], |
|
[{"text": "Erstellen Sie eine detaillierte Anleitung zur Installation eines LAMP-Stacks auf einem Linux-Server und erläutern Sie die Verwendung jedes Bestandteils."}], |
|
[{"text": "日本語で自己紹介のメールを書いてください。仕事で初めて連絡を取る相手に、自分の役職と業務内容を説明してください。"}], |
|
[{"text": "Опишите процесс создания и использования базы данных для управления запасами на складе, включая ключевые функции и примеры SQL-запросов."}], |
|
[{"text": "Przedstaw krótki przewodnik po najważniejszych zabytkach Krakowa, podkreślając ich historyczne znaczenie."}], |
|
[{"text": "Schrijf een korte handleiding voor het opzetten van een crowdfundingcampagne, inclusief tips voor succes en valkuilen om te vermijden."}], |
|
[{"text": "एक निबंध लिखिए जिसमें सोशल मीडिया के उपयोग के फायदे और नुकसान पर चर्चा की गई हो, और यह कैसे समाज को प्रभावित कर रहा है।"}], |
|
[{"text": "Türkçe öğrenen yabancılar için Türk alfabesini ve en temel dilbilgisi kurallarını açıklayan bir kılavuz yazın."}], |
|
] |
|
|
|
random.shuffle(EXAMPLES) |
|
|
|
HEAD = """ |
|
<script> |
|
function schedule_updates() { |
|
const client_info_element = document.querySelector("#client_info textarea"); |
|
client_info_element.value = "The current time is " + new Date().toLocaleString('en-US', { |
|
dateStyle: 'full', |
|
timeStyle: 'short', |
|
}) |
|
client_info_element.dispatchEvent(new Event('input')); |
|
} |
|
|
|
function bootstrap() { |
|
setInterval(schedule_updates, 1000); |
|
}; |
|
|
|
bootstrap(); |
|
</script> |
|
""" |
|
|
|
DESCRIPTION = """\ |
|
# Ghost 8B Beta (β, 128k) |
|
|
|
**Ghost 8B Beta** outperforms leading models like Llama 3.1 8B Instruct and GPT-3.5 Turbo in lc_winrate scores. It also surpasses Claude 3 Opus, Claude 3 Sonnet, GPT-4, and Mistral Large in AlpacaEval 2.0 winrate scores. The model offers two context length versions: [8k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k) and [128k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k), both with built-in multilingual function support. See details about the model [here](https://ghost-x.org/docs/models/ghost-8b-beta), download from [HuggingFace](https://huggingface.co/ghost-x/ghost-8b-beta-1608). |
|
|
|
Supported languages: 🇬🇧 English, 🇻🇳 Vietnamese, 🇰🇷 Korean, 🇪🇸 Spanish, 🇵🇹 Portuguese, 🇨🇳 Chinese, 🇫🇷 French, 🇮🇹 Italian, 🇩🇪 German, 🇯🇵 Japanese, 🇷🇺 Russian, 🇵🇱 Polish, 🇳🇱 Dutch, 🇮🇳 Hindi, 🇹🇷 Turkish, 🇮🇩 Indonesian. |
|
|
|
Note: with the image will be used another model to explain rather than using directly the Ghost 8B Beta model. |
|
""" |
|
|
|
|
|
PLACEHOLDER = """ |
|
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> |
|
<h1 style="font-size: 26px; margin-bottom: 2px; opacity: 0.20;">👋 Welcome to the Ghost 8B Beta Playground! 🎉</h1> |
|
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.10;">Ask me anything and let's have some fun! 🤔💡</p> |
|
</div> |
|
""" |
|
|
|
LICENSE = """ |
|
<p/> |
|
|
|
--- |
|
Ghost 8B Beta may give inaccurate information, including information about people, so please verify Ghost 8B Beta's answers. [Ghost 8B Beta](https://ghost-x.org/docs/models/ghost-8b-beta/) by [Ghost X](https://ghost-x.org). |
|
""" |
|
|
|
if not torch.cuda.is_available(): |
|
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>" |
|
|
|
|
|
def workaround_fixed_get_imports(filename: str | os.PathLike) -> list[str]: |
|
""" |
|
Workaround for fixed get_imports function. |
|
|
|
@args: |
|
filename (str | os.PathLike): The filename or path to the file. |
|
|
|
@returns: |
|
list[str]: The list of imports. |
|
|
|
@remarks: |
|
- This function is a workaround for the fixed get_imports function. |
|
- It checks if the filename ends with "/modeling_florence2.py". |
|
- If it doesn't, it calls the original get_imports function. |
|
- If it does, it calls the original get_imports function and removes the "flash_attn" import. |
|
|
|
@usage: |
|
```python |
|
from unittest.mock import patch |
|
image_torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 |
|
with patch( |
|
"transformers.dynamic_module_utils.get_imports", workaround_fixed_get_imports |
|
): |
|
``` |
|
""" |
|
|
|
if not str(filename).endswith("/modeling_florence2.py"): |
|
return get_imports(filename) |
|
imports = get_imports(filename) |
|
imports.remove("flash_attn") |
|
return imports |
|
|
|
|
|
if torch.cuda.is_available(): |
|
hf_serect = os.getenv("HF_TOKEN", None) |
|
attn_implementation = "flash_attention_2" |
|
|
|
chat_model_id = "ghost-x/ghost-8b-beta-1608" |
|
chat_device = torch.device("cuda") |
|
chat_model = AutoModelForCausalLM.from_pretrained( |
|
chat_model_id, |
|
device_map="auto", |
|
torch_dtype=torch.bfloat16, |
|
attn_implementation=attn_implementation, |
|
trust_remote_code=True, |
|
token=hf_serect, |
|
) |
|
chat_tokenizer = AutoTokenizer.from_pretrained( |
|
chat_model_id, |
|
trust_remote_code=True, |
|
token=hf_serect, |
|
) |
|
SelfExtend.apply( |
|
chat_model, |
|
group_size=16, |
|
window_size=512, |
|
enable_flash_attention=True, |
|
flash_attention_impl="flash_attn", |
|
) |
|
chat_model.generation_config.max_length = 123392 |
|
|
|
image_model_id = "microsoft/Florence-2-large" |
|
|
|
|
|
image_device = "cpu" |
|
image_torch_dtype = torch.float32 |
|
image_model = ( |
|
AutoModelForCausalLM.from_pretrained( |
|
image_model_id, |
|
torch_dtype=image_torch_dtype, |
|
trust_remote_code=True, |
|
token=hf_serect, |
|
) |
|
.to(image_device) |
|
.eval() |
|
) |
|
image_processor = AutoProcessor.from_pretrained( |
|
image_model_id, |
|
trust_remote_code=True, |
|
token=hf_serect, |
|
) |
|
|
|
|
|
waiting_tools_timeout = 5 |
|
supported_tools = json.dumps( |
|
[ |
|
{ |
|
"type": "function", |
|
"function": { |
|
"name": "search_on_internet", |
|
"description": "Use this tool to search for information on the internet to answer questions you are unsure about, don't know or need the latest information (e.g. news, reports, companies, people,...) to give the most accurate results. Note: can only be used or ignored, not asked again", |
|
"parameters": { |
|
"type": "object", |
|
"properties": { |
|
"keyword": { |
|
"type": "string", |
|
"description": "Search keywords, rephrase to optimize search results based on questions suitable to the specified search type.", |
|
"required": True, |
|
}, |
|
"type": { |
|
"type": "string", |
|
"description": "Search type, based on the question to determine whether to search for it in 'wikipedia' or 'google', prefer to use wikipedia for information about events, history and people.", |
|
"enum": ["wikipedia", "google"], |
|
"default": "google", |
|
"required": True, |
|
}, |
|
"language": { |
|
"type": "string", |
|
"description": "Search language, is the user language code with 2 letters, e.g: vi = vietnamese, en = english.", |
|
"default": "en", |
|
"required": True, |
|
}, |
|
}, |
|
}, |
|
}, |
|
} |
|
], |
|
ensure_ascii=False, |
|
) |
|
|
|
|
|
@lru_cache(maxsize=128) |
|
def extract_text_from_webpage(html_content): |
|
""" |
|
Extracts visible text from an HTML webpage. |
|
|
|
@args: |
|
html_content (str): The HTML content of the webpage. |
|
|
|
@returns: |
|
str: The visible text extracted from the webpage. |
|
|
|
@remarks: |
|
- This function uses the BeautifulSoup library to parse the HTML content. |
|
- It removes certain tags (script, style, header, footer, nav, form, svg) from the parsed HTML. |
|
- The remaining visible text is then extracted using the `get_text` method of BeautifulSoup. |
|
- The extracted text is stripped of leading/trailing whitespace and separated by a single space. |
|
""" |
|
|
|
soup = BeautifulSoup(html_content, "html.parser") |
|
for tag in soup(["script", "style", "header", "footer", "nav", "form", "svg"]): |
|
tag.extract() |
|
visible_text = soup.get_text(strip=True, separator=" ") |
|
return visible_text |
|
|
|
|
|
def search_with_wikipedia( |
|
query: str, |
|
language: str = "en", |
|
): |
|
""" |
|
Search for a given query on Wikipedia and return the summary. |
|
|
|
@args: |
|
query (str): The search query. |
|
language (str, optional): The language code for the Wikipedia page. Defaults to "en". |
|
|
|
@returns: |
|
list: A list containing the summary of the Wikipedia page. |
|
|
|
@remarks: |
|
- This function uses the Wikipedia API to search for the given query. |
|
- The language parameter determines the language of the Wikipedia page to search. |
|
- If the search is successful, the function returns a list containing the summary of the page. |
|
- If an exception occurs during the search, an empty list is returned. |
|
""" |
|
|
|
all_results = [] |
|
try: |
|
wikipedia.set_lang(language) |
|
all_results.append(wikipedia.summary(query)) |
|
except Exception as e: |
|
pass |
|
return all_results |
|
|
|
|
|
def search_with_google( |
|
query: str, |
|
num_results: int = 3, |
|
timeout: int = 5, |
|
language: str = "en", |
|
ssl_verify: bool = None, |
|
): |
|
""" |
|
Searches Google for the given query and returns a list of search results. |
|
|
|
@args: |
|
query (str): The search query. |
|
num_results (int, optional): The number of search results to retrieve. Defaults to 3. |
|
timeout (int, optional): The timeout value for the HTTP requests. Defaults to 5. |
|
language (str, optional): The language for the search results. Defaults to "en". |
|
ssl_verify (bool, optional): Whether to verify SSL certificates. Defaults to None. |
|
|
|
@returns: |
|
list: A list of dictionaries containing the link and visible text of each search result. |
|
|
|
@remarks: |
|
- This function uses the requests library to send HTTP requests to Google. |
|
- It sets the User-Agent header to mimic a Firefox browser. |
|
- The search results are retrieved from the HTML response using BeautifulSoup. |
|
- Each search result is represented as a dictionary with "link" and "text" keys. |
|
- The "link" key contains the URL of the search result. |
|
- The "text" key contains the visible text extracted from the search result webpage. |
|
- If the visible text exceeds 4096 characters, it is truncated to that length. |
|
- If an error occurs while fetching or processing a search result, it is printed and ignored. |
|
""" |
|
|
|
|
|
all_results = [] |
|
|
|
|
|
max_chars_per_page = 4096 |
|
|
|
|
|
with requests.Session() as session: |
|
|
|
resp = session.get( |
|
url="https://www.google.com/search", |
|
headers={ |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0" |
|
}, |
|
params={ |
|
"q": query, |
|
"num": num_results, |
|
"udm": 14, |
|
"hl": language, |
|
}, |
|
timeout=timeout, |
|
verify=ssl_verify, |
|
) |
|
|
|
|
|
resp.raise_for_status() |
|
|
|
|
|
soup = BeautifulSoup(resp.text, "html.parser") |
|
|
|
|
|
result_block = soup.find_all("div", attrs={"class": "g"}) |
|
|
|
|
|
for result in result_block: |
|
|
|
link = result.find("a", href=True) |
|
|
|
|
|
if link: |
|
link = link["href"] |
|
try: |
|
|
|
webpage = session.get( |
|
link, |
|
headers={ |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0" |
|
}, |
|
) |
|
|
|
|
|
webpage.raise_for_status() |
|
|
|
|
|
visible_text = extract_text_from_webpage(webpage.text) |
|
|
|
|
|
if len(visible_text) > max_chars_per_page: |
|
visible_text = visible_text[:max_chars_per_page] |
|
|
|
|
|
all_results.append({"link": link, "text": visible_text}) |
|
except requests.exceptions.RequestException as e: |
|
|
|
print(f"Error fetching or processing {link}: {e}") |
|
pass |
|
else: |
|
pass |
|
|
|
|
|
return all_results |
|
|
|
|
|
@lru_cache(maxsize=128) |
|
def extract_text_from_image(file: str) -> str: |
|
""" |
|
Extracts text from an image file. |
|
|
|
@args: |
|
file (str): The path or URL of the image file. |
|
|
|
@returns: |
|
str: The extracted text from the image. |
|
|
|
@remarks: |
|
- This function uses an LRU cache to store previously processed images for faster retrieval. |
|
- The image file can be either a local file path or a URL. |
|
- The function opens the image file using the PIL library. |
|
- The function processes the image using an image processor. |
|
- The processed image is then passed to a text generation model to generate text. |
|
- The generated text is post-processed to obtain the final extracted text. |
|
""" |
|
|
|
task = "<MORE_DETAILED_CAPTION>" |
|
image = Image.open( |
|
requests.get(file, stream=True).raw |
|
if file.startswith("http") |
|
else open(file, "rb") |
|
) |
|
|
|
if image.mode != "RGB": |
|
image = image.convert("RGB") |
|
|
|
|
|
inputs = image_processor(text=task, images=image, return_tensors="pt").to( |
|
"cpu", image_torch_dtype |
|
) |
|
|
|
|
|
generated_ids = image_model.generate( |
|
input_ids=inputs["input_ids"], |
|
pixel_values=inputs["pixel_values"], |
|
max_new_tokens=1024, |
|
num_beams=3, |
|
do_sample=False, |
|
) |
|
|
|
|
|
generated_text = image_processor.batch_decode( |
|
generated_ids, skip_special_tokens=False |
|
)[0] |
|
parsed_answer = image_processor.post_process_generation( |
|
generated_text, |
|
task=task, |
|
image_size=(image.width, image.height), |
|
) |
|
|
|
|
|
return parsed_answer[task] |
|
|
|
|
|
@spaces.GPU(duration=90) |
|
def generate_chat( |
|
uuid: str, |
|
message: dict, |
|
chat_history: list[tuple[str, str]], |
|
allow_used_tools: bool = True, |
|
system_prompt: str = "", |
|
max_new_tokens: int = 1536, |
|
temperature: float = 0.4, |
|
top_p: float = 0.95, |
|
top_k: int = 50, |
|
repetition_penalty: float = 1.0, |
|
client_info: str = None, |
|
) -> Iterator[str]: |
|
|
|
def build_input_ids( |
|
system_prompt: str = "", |
|
apply_tools: bool = None, |
|
references=None, |
|
): |
|
conversation = [] |
|
|
|
|
|
if system_prompt: |
|
if system_prompt.strip() == DEFAULT_SYSTEM_PROMPT.strip(): |
|
system_prompt = system_prompt.strip() + "\n\n" + client_info + "\n" |
|
conversation.append({"role": "system", "content": system_prompt}) |
|
|
|
|
|
if apply_tools is True: |
|
conversation.append({"role": "tools", "content": supported_tools}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
references is not None |
|
and isinstance(references, list) |
|
and len(references) > 0 |
|
): |
|
formatted_references = f"Analyze the provided references, extract relevant information to provide accurate and objective feedback. This reference information may include: conversation context, assistant or user memories, reasoning guides, problem-solving suggestions, assistant rules, etc.\nIf the reference is not relevant, ignore it. Try to have a balanced approach, avoiding over-reliance on the documentation." |
|
formatted_references += "\n\n" + json.dumps( |
|
references, indent=2, ensure_ascii=False |
|
) |
|
conversation.append( |
|
{ |
|
"role": "refs", |
|
"content": formatted_references, |
|
} |
|
) |
|
|
|
|
|
for user, assistant in chat_history: |
|
conversation.extend( |
|
[ |
|
{"role": "user", "content": user}, |
|
{"role": "assistant", "content": assistant}, |
|
] |
|
) |
|
|
|
|
|
conversation.append( |
|
{ |
|
"role": "user", |
|
"content": ( |
|
f"{' & '.join(message['attachments'])}\n\n{message['text']}" |
|
if "attachments" in message and len(message["attachments"]) > 0 |
|
else f"{message['text']}" |
|
), |
|
} |
|
) |
|
|
|
logger.info(f"UUID: {uuid} - Conversation: {conversation}") |
|
|
|
|
|
input_ids = chat_tokenizer.apply_chat_template( |
|
conversation, add_generation_prompt=True, return_tensors="pt" |
|
) |
|
input_ids = input_ids.to(chat_model.device) |
|
|
|
|
|
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: |
|
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] |
|
gr.Warning( |
|
f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens." |
|
) |
|
return input_ids |
|
|
|
|
|
def generate_chat_responses( |
|
previous_response: str = None, |
|
): |
|
document_references = [] |
|
|
|
|
|
if previous_response is not None: |
|
scheduled_tools_runs = None |
|
try: |
|
scheduled_tools_runs = json.loads(previous_response) |
|
if scheduled_tools_runs["type"] == "function" and scheduled_tools_runs[ |
|
"name" |
|
] in ["search_on_internet"]: |
|
pass |
|
else: |
|
scheduled_tools_runs = None |
|
except Exception as e: |
|
print(e) |
|
pass |
|
|
|
|
|
if ( |
|
scheduled_tools_runs is not None |
|
and scheduled_tools_runs["name"] == "search_on_internet" |
|
): |
|
keyword = scheduled_tools_runs["arguments"]["keyword"] |
|
search_type = scheduled_tools_runs["arguments"]["type"] |
|
language = scheduled_tools_runs["arguments"]["language"] |
|
|
|
|
|
if search_type == "wikipedia": |
|
gr.Info("Searching for information on the Wikipedia.") |
|
document_references.extend( |
|
search_with_wikipedia(query=keyword, language=language) |
|
) |
|
|
|
|
|
gr.Info("Searching for information on the Google.") |
|
document_references.extend( |
|
search_with_google( |
|
query=keyword, |
|
language=language, |
|
num_results=3, |
|
) |
|
) |
|
print("document_references:", document_references) |
|
|
|
|
|
apply_tools = ( |
|
True if allow_used_tools is True and previous_response is None else False |
|
) |
|
|
|
|
|
input_ids = build_input_ids( |
|
system_prompt=system_prompt, |
|
apply_tools=apply_tools, |
|
references=document_references, |
|
) |
|
|
|
|
|
streamer = TextIteratorStreamer( |
|
chat_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True |
|
) |
|
|
|
|
|
generate_kwargs = dict( |
|
input_ids=input_ids, |
|
streamer=streamer, |
|
max_new_tokens=max_new_tokens, |
|
do_sample=True, |
|
repetition_penalty=repetition_penalty, |
|
) |
|
if temperature == 0: |
|
generate_kwargs["do_sample"] = False |
|
else: |
|
generate_kwargs["temperature"] = temperature |
|
generate_kwargs["top_p"] = top_p |
|
generate_kwargs["top_k"] = top_k |
|
|
|
|
|
t = Thread(target=chat_model.generate, kwargs=generate_kwargs) |
|
t.start() |
|
|
|
logger.info( |
|
f"UUID: {uuid} - Is apply tools: {apply_tools} - Is apply documents: {len(document_references) > 0} - Is previous response: {previous_response is not None} - Start generating chat responses" |
|
) |
|
|
|
state = { |
|
"mark": None, |
|
"respond": False, |
|
} |
|
outputs = [] |
|
for text in streamer: |
|
if state["mark"] is None: |
|
state["mark"] = time.time() |
|
outputs.append(text) |
|
if ( |
|
apply_tools is False |
|
or state["mark"] + waiting_tools_timeout < time.time() |
|
): |
|
state["respond"] = True |
|
yield "".join(outputs) |
|
|
|
|
|
if ( |
|
apply_tools is True |
|
and state["respond"] is False |
|
and state["mark"] + waiting_tools_timeout > time.time() |
|
): |
|
previous_response = "".join(outputs) |
|
yield from generate_chat_responses(previous_response=previous_response) |
|
|
|
|
|
yield from generate_chat_responses(previous_response=None) |
|
|
|
|
|
def generate( |
|
message: dict, |
|
chat_history: list[tuple[str, str]], |
|
allow_used_tools: bool = True, |
|
system_prompt: str = "", |
|
max_new_tokens: int = 1536, |
|
temperature: float = 0.4, |
|
top_p: float = 0.95, |
|
top_k: int = 50, |
|
repetition_penalty: float = 1.0, |
|
client_info: str = None, |
|
) -> Iterator[str]: |
|
|
|
uuid = zlib.crc32(str.encode(str(time.time()))) |
|
logger.info(f"UUID: {uuid} - Starting image text extraction process") |
|
|
|
|
|
if len(message["files"]) > 2: |
|
gr.Warning("Only the first 2 images will be processed.") |
|
|
|
message["files"] = message["files"][:2] |
|
|
|
|
|
message["attachments"] = handle_file_extraction( |
|
files=list(message["files"]), uuid=uuid |
|
) |
|
logger.info(f"UUID: {uuid} - Image text extraction process completed") |
|
|
|
logger.info(f"UUID: {uuid} - Previous chat history: {chat_history}") |
|
for idx, chat_pair in enumerate(chat_history): |
|
user_message, assistant_message = chat_pair |
|
if not isinstance(user_message, str) and assistant_message is None: |
|
text_descriptions = handle_file_extraction( |
|
files=list(user_message), uuid=uuid |
|
) |
|
chat_input = ( |
|
f"{' & '.join(text_descriptions)}\n\n{chat_history[idx + 1][0]}" |
|
) |
|
chat_history[idx + 1][0] = chat_input |
|
chat_history[idx] = [None, None] |
|
logger.info( |
|
f"UUID: {uuid} - Updated chat history: {chat_history} - Updated chat input: {chat_input}" |
|
) |
|
|
|
chat_history = list( |
|
filter(lambda x: x[0] is not None and x[1] is not None, chat_history) |
|
) |
|
logger.info(f"UUID: {uuid} - Filtered chat history: {chat_history}") |
|
|
|
yield from generate_chat( |
|
uuid=uuid, |
|
message=message, |
|
chat_history=chat_history, |
|
allow_used_tools=allow_used_tools, |
|
system_prompt=system_prompt, |
|
max_new_tokens=max_new_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
top_k=top_k, |
|
repetition_penalty=repetition_penalty, |
|
client_info=client_info, |
|
) |
|
|
|
|
|
def handle_file_extraction(files: list[str], uuid: str): |
|
""" |
|
Extracts text from images in the given message's files and returns a list of attachments. |
|
|
|
@args: |
|
message (dict): The message containing files to extract text from. |
|
uuid (str): The UUID associated with the extraction process. |
|
|
|
@returns: |
|
list: A list of attachments, each represented as a string. |
|
|
|
@memarks: |
|
- This function iterates over the files in the message and extracts text from each image file. |
|
- The extracted text is logged along with the UUID and file information. |
|
- The extracted text is then added to the attachments list as a string representation of an attachment. |
|
- The attachments list is returned at the end of the function. |
|
""" |
|
|
|
attachments = [] |
|
for idx, file_to_extract in enumerate(files): |
|
extracted_text = extract_text_from_image(file=file_to_extract) |
|
logger.info( |
|
f"UUID: {uuid} - File: {file_to_extract} - Extracted text: {extracted_text}" |
|
) |
|
attachments.append( |
|
f'<attachment index="{idx}" type="image" description="{extracted_text}" />' |
|
) |
|
return attachments |
|
|
|
|
|
chatbot = gr.Chatbot( |
|
height=500, |
|
placeholder=PLACEHOLDER, |
|
label="Ghost 8B Beta (β, 128k)", |
|
show_copy_button=True, |
|
) |
|
|
|
chat_interface = gr.ChatInterface( |
|
fn=generate, |
|
chatbot=chatbot, |
|
fill_height=True, |
|
multimodal=True, |
|
textbox=gr.MultimodalTextbox( |
|
file_types=["image"], |
|
placeholder="Type a message...", |
|
), |
|
additional_inputs=[ |
|
gr.Checkbox( |
|
label="Allow used tools (available: search on internet)", |
|
value=False, |
|
), |
|
gr.Textbox(label="System prompt", lines=6, value=DEFAULT_SYSTEM_PROMPT), |
|
gr.Slider( |
|
label="Max new tokens", |
|
minimum=1, |
|
maximum=MAX_MAX_NEW_TOKENS, |
|
step=1, |
|
value=DEFAULT_MAX_NEW_TOKENS, |
|
), |
|
gr.Slider( |
|
label="Temperature", |
|
minimum=0.0, |
|
maximum=2.0, |
|
step=0.1, |
|
value=0.4, |
|
), |
|
gr.Slider( |
|
label="Top-p (nucleus sampling)", |
|
minimum=0.05, |
|
maximum=1.0, |
|
step=0.05, |
|
value=0.95, |
|
), |
|
gr.Slider( |
|
label="Top-k", |
|
minimum=1, |
|
maximum=100, |
|
step=1, |
|
value=50, |
|
), |
|
gr.Slider( |
|
label="Repetition penalty", |
|
minimum=1.0, |
|
maximum=2.0, |
|
step=0.05, |
|
value=1.0, |
|
), |
|
gr.Textbox( |
|
elem_id="client_info", |
|
label="Client info", |
|
lines=1, |
|
value="The current time is {}".format( |
|
time.strftime("%A, %D %B %Y %H:%M:%S") |
|
), |
|
visible=False, |
|
), |
|
], |
|
additional_inputs_accordion=gr.Accordion(label="Additional Inputs", open=True), |
|
stop_btn="Stop", |
|
cache_examples=False, |
|
examples=EXAMPLES, |
|
examples_per_page=8, |
|
concurrency_limit=100, |
|
) |
|
|
|
with gr.Blocks(fill_height=True, css="style.css", head=HEAD) as demo: |
|
gr.Markdown(DESCRIPTION) |
|
chat_interface.render() |
|
gr.Markdown(LICENSE) |
|
|
|
if __name__ == "__main__": |
|
demo.queue().launch(share=True) |
|
|