import time import gradio as gr from ctransformers import AutoModelForCausalLM from spellchecker import SpellChecker llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardLM-7B-uncensored-GGUF", model_file="WizardLM-7B-uncensored.Q3_K_M.gguf", model_type="llama", stream=True) history = [""] spell = SpellChecker() def correct_words(text): words = text.split() corrected_words = [spell.correction(word) for word in words] corrected_text = ' '.join(corrected_words) return corrected_text def generate_response(message): global history for _ in range(2): # Assuming you want two responses, adjust as needed tokens = [ord(char) for char in message] # Convert characters to ASCII values response = llm.generate(tokens=tokens, top_k=50, top_p=0.95, temperature=1.0, repetition_penalty=1.0, last_n_tokens=1,stream=True) response_text = ' '.join(map(str, response)) time.sleep(2) corrected_response = correct_words(response_text) history.append(corrected_response) yield ' '.join(history) # Clear the history list after the last response history = ["Chatbot:"] # Clear the history list after the last response history = ["Chatbot:"] def chatbot(message, history): response_generator = generate_response(message) for response in response_generator: time.sleep(0.1) # Optional delay for a natural chat feel yield response iface = gr.ChatInterface(chatbot) iface.launch()