import gradio as gr #from huggingface_hub import InferenceClient import torch import bitsandbytes from unsloth import FastLanguageModel from transformers import TextStreamer, StoppingCriteriaList, StoppingCriteria, TextIteratorStreamer from threading import Thread model, tokenizer = FastLanguageModel.from_pretrained( model_name = "jjsprockel/Patologia_lora_model1", max_seq_length = 2048, dtype = None, load_in_4bit = True, ) FastLanguageModel.for_inference(model) class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: stop_ids = [29, 0] for stop_id in stop_ids: if input_ids[0][-1] == stop_id: return True return False def predict(message, history): history_transformer_format = history + [[message, ""]] stop = StopOnTokens() messages = "".join(["".join(["\n:"+item[0], "\n:"+item[1]]) for item in history_transformer_format]) model_inputs = tokenizer([messages], return_tensors="pt").to("cuda") streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( model_inputs, streamer=streamer, max_new_tokens=2048, #do_sample=True, #top_p=0.95, #top_k=1000, #temperature=1.0, #num_beams=1, stopping_criteria=StoppingCriteriaList([stop]) ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() partial_message = "" for new_token in streamer: if new_token != '<': partial_message += new_token yield partial_message gr.ChatInterface(predict).launch(debug=True)