from transformers import AutoTokenizer, pipeline, logging from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig import gradio as gr import torch title = "🤖AI ChatBot" description = "Building open-domain chatbots is a challenging area for machine learning research." examples = [["How are you?"]] model_name_or_path = "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ" model_basename = "Wizard-Vicuna-30B-Uncensored-GPTQ-4bit--1g.act.order" use_triton = False tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, model_basename=model_basename, use_safetensors=True, trust_remote_code=False, device="cuda:0", use_triton=use_triton, quantize_config=None) def predict(input, history=[]): # tokenize the new input sentence new_user_input_ids = tokenizer.encode( input + tokenizer.eos_token, return_tensors="pt" ) # append the new user input tokens to the chat history bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) # generate a response history = model.generate( bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id ).tolist() # convert the tokens to text, and then split the responses into lines response = tokenizer.decode(history[0]).split("<|endoftext|>") # print('decoded_response-->>'+str(response)) response = [ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2) ] # convert to tuples of list # print('response-->>'+str(response)) return response, history gr.Interface( fn=predict, title=title, description=description, examples=examples, inputs=["text", "state"], outputs=["chatbot", "state"], theme="finlaymacklon/boxy_violet", ).launch()