daiwk's picture
2b-it
26d68f7 verified
import gradio as gr
from transformers import AutoTokenizer, GemmaForCausalLM
import torch
model = GemmaForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto", torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
# from transformers import AutoTokenizer, GemmaForCausalLM, BitsAndBytesConfig
# quantization_config = BitsAndBytesConfig(load_in_4bit=True)
# tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
# model = GemmaForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config)
#prompt = "What is your favorite condiment?"
def generate(prompt):
input_ids = tokenizer(prompt, return_tensors="pt")
# Generate
#generate_ids = model.generate(inputs.input_ids, max_length=100)
#return tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
outputs = model.generate(**input_ids)
return(tokenizer.decode(outputs[0]))
demo = gr.Interface(
fn=generate,
inputs=gr.Textbox(lines=5, label="Input Text"),
outputs=gr.Textbox(label="Generated Text")
)
demo.launch(share=True)
# # pip install bitsandbytes accelerate
# from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
# quantization_config = BitsAndBytesConfig(load_in_4bit=True)
# tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
# model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config)
# input_text = "Write me a poem about Machine Learning."
# input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
# outputs = model.generate(**input_ids)
# print(tokenizer.decode(outputs[0]))