Spaces:
Sleeping
Sleeping
from huggingface_hub import cached_download, hf_hub_url | |
from transformers import pipeline | |
# Model ID from Hugging Face Hub | |
MODEL_ID = "ISTA-DASLab/gemma-2b-AQLM-2Bit-2x8-hf" | |
# Download the model (if not already cached) | |
model = cached_download(hf_hub_url(MODEL_ID)) | |
# Create a text generation pipeline | |
generator = pipeline("text-generation", model=model) | |
def generate_text(prompt): | |
"""Generates text using the loaded model. | |
Args: | |
prompt: The user input to guide the generation. | |
Returns: | |
The generated text. | |
""" | |
generated_text = generator(prompt, max_length=50, num_return_sequences=1)[0]['generated_text'] | |
return generated_text | |
# Space UI (using Streamlit for demonstration) | |
import streamlit as st | |
st.title("Text Generation with ISTA-DASLab/gemma-2b-AQLM-2Bit-2x8-hf") | |
prompt = st.text_input("Enter a prompt (e.g., My name is Teven and I am...)") | |
if st.button("Generate"): | |
generated_text = generate_text(prompt) | |
st.write(generated_text) | |