import os import random from huggingface_hub import InferenceClient import gradio as gr #from utils import parse_action, parse_file_content, read_python_module_structure from datetime import datetime from PIL import Image import agent from models import models import urllib.request import uuid import requests import io from chat_models import models as c_models loaded_model=[] chat_model=[] for i,model in enumerate(models): loaded_model.append(gr.load(f'models/{model}')) print (loaded_model) for i,model_c in enumerate(c_models): chat_model.append(model_c) print (chat_model) now = datetime.now() date_time_str = now.strftime("%Y-%m-%d %H:%M:%S") #client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") history = [] def gen_from_infer(purpose,history,image,model_drop,chat_drop,choice,seed,im_seed): #out_img = infer(out_prompt) history.clear() if seed == 0: seed = random.randint(1,1111111111111111) if im_seed == 0: im_seed = random.randint(1,1111111111111111) out_prompt=generate(purpose,history,chat_drop,seed) history.append((purpose,out_prompt)) yield (history,None) infer_model = models[int(model_drop)] print (infer_model) infer=InferenceClient(f'{infer_model}') print (infer) out_img=infer.text_to_image( prompt=out_prompt, negative_prompt=None, height=512, width=512, num_inference_steps=None, guidance_scale=None, model=None, seed=im_seed, ) yield (history,out_img) def format_prompt(message, history,seed): #print (f'HISTORY ::: {history}') prompt = "" t=False for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " print(f'MESSAGE :: {message}, USER_PROMPT :: {user_prompt}') if user_prompt == message: t=True if t==True: prompt = ""+f"[INST] {message} [/INST]" return prompt else: prompt += f"[INST] {message} [/INST]" return prompt def run_gpt(in_prompt,history,model_drop,seed): client = InferenceClient(c_models[int(model_drop)]) print(f'history :: {history}') prompt=format_prompt(in_prompt,history,seed) if seed == 0: seed = random.randint(1,1111111111111111) print (seed) generate_kwargs = dict( temperature=1.0, max_new_tokens=1048, top_p=0.99, repetition_penalty=1.0, do_sample=True, seed=seed, ) content = agent.GENERATE_PROMPT + prompt print(content) stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) resp = "" for response in stream: resp += response.token.text return resp def run_idefics(in_prompt,history,image,model_drop,seed): send_list=[] #client = InferenceClient("HuggingFaceM4/idefics-9b-instruct") client = InferenceClient("HuggingFaceM4/idefics-80b-instruct") print(f'history :: {history}') prompt=format_prompt(in_prompt,history,seed) seed = random.randint(1,1111111111111111) print (seed) generate_kwargs = dict( temperature=1.0, max_new_tokens=512, top_p=0.99, repetition_penalty=1.0, do_sample=True, seed=seed, ) generation_args = { "max_new_tokens": 256, "repetition_penalty": 1.0, "stop_sequences": ["", "\nUser:"], } #content = f'{agent.IDEFICS_PROMPT}' +"\nUser"+ in_prompt +f' ![]({image})' #send_list.append(agent.IDEFICS_PROMPT) #send_list.append(prompt) #send_list.append(image) content = "\nUser: What is in this image?![](https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG)\nAssistant:" print(content) stream = client.text_generation(prompt=content, **generation_args) #stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) #resp = "" #for response in stream: # resp += response.token.text print (stream) return stream def generate(purpose,history,chat_drop,seed): print (history) out_prompt = run_gpt(purpose,history,chat_drop,seed) return out_prompt def describe(purpose,history,image,chat_drop,seed): print (history) #purpose=f"{purpose},![]({image})" out_prompt = run_idefics(purpose,history,image,chat_drop,seed) return out_prompt def run(purpose,history,image,model_drop,chat_drop,choice,seed): if choice == "Generate": #out_img = infer(out_prompt) out_prompt=generate(purpose,history,chat_drop,seed) history.append((purpose,out_prompt)) yield (history,None) model=loaded_model[int(model_drop)] out_img=model(out_prompt) #return (history,None) print(out_img) url=f'https://johann22-chat-diffusion-describe.hf.space/file={out_img}' print(url) uid = uuid.uuid4() #urllib.request.urlretrieve(image, 'tmp.png') #out=Image.open('tmp.png') r = requests.get(url, stream=True) if r.status_code == 200: out = Image.open(io.BytesIO(r.content)) #yield ([(purpose,out_prompt)],out) yield (history,out) else: yield ([(purpose,"an Error occured")],None) if choice == "Describe": #out_img = infer(out_prompt) out_prompt=describe(purpose,history,image,chat_drop,seed) history.append((purpose,out_prompt)) yield (history,None) ################################################ style=""" .top_head{ background: no-repeat; background-image: url(https://huggingface.co/spaces/johann22/chat-diffusion/resolve/main/image.png); background-position-y: bottom; height: 180px; background-position-x: center; } .top_h1{ color: white!important; -webkit-text-stroke-width: medium; } """ with gr.Blocks(css=style) as iface: gr.HTML("""

Mixtral Chat Diffusion


This chatbot will generate images