# Install Gradio for creating an interface import gradio as gr import torch from transformers import AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer from peft import AutoPeftModelForCausalLM from threading import Thread # Load the fine-tuned model and tokenizer new_model = "adhisetiawan/phi2_DPO" model = AutoPeftModelForCausalLM.from_pretrained(new_model, low_cpu_mem_usage=True, torch_dtype=torch.float16, load_in_4bit=True,) tokenizer = AutoTokenizer.from_pretrained(new_model) model = model.to('cuda:0') # Define stopping criteria class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: stop_ids = [29, 0] # Token IDs to stop the generation for stop_id in stop_ids: if input_ids[0][-1] == stop_id: return True return False # Define the prediction function def predict(message, history): # Transform history into the required format history_transformer_format = history + [[message, ""]] stop = StopOnTokens() # Format messages for the model messages = "".join(["".join(["\n:"+item[0], "\n:"+item[1]]) for item in history_transformer_format]) model_inputs = tokenizer([messages], return_tensors="pt").to("cuda") # Set up the streamer and generate responses streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( model_inputs, streamer=streamer, max_new_tokens=1024, do_sample=True, top_p=0.95, top_k=1000, temperature=1.0, num_beams=1, stopping_criteria=StoppingCriteriaList([stop]) ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() # Yield partial messages as they are generated partial_message = "" for new_token in streamer: if new_token != '<': partial_message += new_token yield partial_message # Launch Gradio Chat Interface gr.ChatInterface(predict).queue().launch(debug=True)