import gradio as gr import keras import keras_nlp import numpy as np import pandas as pd import os os.environ["KERAS_BACKEND"] = "jax" # you can also use tensorflow or torch os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "1" # avoid memory fragmentation on JAX backend. keras.utils.set_random_seed(42) gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://soufyane/gemma_2b_instruct_FT_DATA_SCIENCE_lora36_1") def generate_answer(history, question): # Replace this with the actual code to generate the answer using your model answer = gemma_lm.generate(f"You are an AI Agent specialized to answer to questions about Data Science and be greatfull and nice and helpfull\n\nQuestion:\n{question}\n\nAnswer:\n", max_length=1024) history.append((question, answer)) return history # Gradio interface with gr.Blocks() as demo: gr.Markdown("# Chatbot") chatbot = gr.Chatbot() with gr.Row(): txt = gr.Textbox(show_label=False, placeholder="Enter your question here...") txt.submit(generate_answer, [chatbot, txt], chatbot) # Launch the interface demo.launch()