import gradio as gr import spaces import os import gc import random import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) from tqdm.auto import tqdm import torch import torch.nn as nn import tokenizers import transformers print(f"tokenizers.__version__: {tokenizers.__version__}") print(f"transformers.__version__: {transformers.__version__}") print(f"torch.__version__: {torch.__version__}") print(f"torch cuda version: {torch.version.cuda}") from transformers import AutoTokenizer, AutoConfig from transformers import BitsAndBytesConfig, AutoModelForCausalLM, MistralForCausalLM from peft import LoraConfig, get_peft_model title = "H2O AI Predict the LLM" #Theme from - https://huggingface.co/spaces/trl-lib/stack-llama/blob/main/app.py theme = gr.themes.Monochrome( primary_hue="indigo", secondary_hue="blue", neutral_hue="slate", radius_size=gr.themes.sizes.radius_sm, font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"], ) def do_submit(question, response): full_text = question + " " + response # result = do_inference(full_text) return "result" with gr.Blocks(title=title) as demo: # theme=theme sample_examples = pd.read_csv('sample_examples.csv') example_list = sample_examples[['Question','Response','target']].sample(2).values.tolist() gr.Markdown(f"## {title}") with gr.Row(): # with gr.Column(scale=1): # gr.Markdown("### Question and LLM Response") question_text = gr.Textbox(lines=2, placeholder="Question:", label="") response_text = gr.Textbox(lines=2, placeholder="Response:", label="") target_text = gr.Textbox(lines=1, placeholder="Target:", label="", interactive=False , visible=False) llm_num = gr.Textbox(value="", label="LLM #") with gr.Row(): sub_btn = gr.Button("Submit") sub_btn.click(fn=do_submit, inputs=[question_text, response_text], outputs=[llm_num]) gr.Markdown("## Sample Inputs:") gr.Examples( example_list, [question_text,response_text,target_text], # cache_examples=True, ) demo.launch(debug=True)