import os import re import torch from threading import Thread from typing import Iterator from mongoengine import connect, Document, StringField, SequenceField import gradio as gr import spaces from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer from peft import PeftModel from openai import OpenAI openai.api_key = os.environ.get("OPENAI_KEY") def generate_image(text): try: response = openai.images.generate( model="dall-e-3", prompt="Create an illustration that accurately depicts the character and the setting of a story:"+text, n=1, size="1024x1024" ) except Exception as error: print(str(error)) raise gr.Error("An error occurred while generating the image. Please check your API key and try again.") return response.data[0].url # Constants MAX_MAX_NEW_TOKENS = 2048 DEFAULT_MAX_NEW_TOKENS = 1024 MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) LICENSE = """ --- As a derivative work of [Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) by Meta, this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md). """ # GPU Check and add CPU warning if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" if torch.cuda.is_available(): # Model and Tokenizer Configuration model_id = "meta-llama/Llama-2-7b-chat-hf" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) base_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=bnb_config) model = PeftModel.from_pretrained(base_model, "ranamhamoud/storytell") tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token def make_prompt(entry): return f"### Human: When asked to explain use a story.Don't repeat the assesments, limit to 500 words.However keep context in mind if edits to the content is required. {entry} ### Assistant:" def process_text(text): text = re.sub(r'\[answer:\]\s*', 'Answer: ', text) text = re.sub(r'\[.*?\](? Iterator[str]: conversation = [] for user, assistant in chat_history: conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) conversation.append({"role": "user", "content": make_prompt(message)}) enc = tokenizer(make_prompt(message), return_tensors="pt", padding=True, truncation=True) input_ids = enc.input_ids.to(model.device) if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=False) generate_kwargs = dict( {"input_ids": input_ids}, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p, top_k=top_k, temperature=temperature, num_beams=1, repetition_penalty=repetition_penalty, ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() outputs = [] for text in streamer: processed_text = process_text(text) outputs.append(processed_text) output = "".join(outputs) yield output final_story = "".join(outputs) image_url = generate_image(final_story) yield image_url chat_interface = gr.ChatInterface( fn=generate, fill_height=True, stop_btn=None, examples=[ ["Can you explain briefly to me what is the Python programming language?"], ["Could you please provide an explanation about the concept of recursion?"], ["Could you explain what a URL is?"] ], theme='shivi/calm_seafoam',autofocus=True ) # Gradio Web Interface with gr.Blocks(css=custom_css,theme='shivi/calm_seafoam',fill_height=True) as demo: chat_interface.render() # gr.Markdown(LICENSE) # Main Execution if __name__ == "__main__": demo.queue(max_size=20) demo.launch(share=True)