File size: 998 Bytes
babb96a
e71b389
babb96a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from huggingface_hub import cached_download, hf_hub_url
pip install transformers
from transformers import pipeline

# Model ID from Hugging Face Hub
MODEL_ID = "ISTA-DASLab/gemma-2b-AQLM-2Bit-2x8-hf"

# Download the model (if not already cached)
model = cached_download(hf_hub_url(MODEL_ID))

# Create a text generation pipeline
generator = pipeline("text-generation", model=model)

def generate_text(prompt):
  """Generates text using the loaded model.

  Args:
      prompt: The user input to guide the generation.

  Returns:
      The generated text.
  """
  generated_text = generator(prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
  return generated_text

# Space UI (using Streamlit for demonstration)
import streamlit as st

st.title("Text Generation with ISTA-DASLab/gemma-2b-AQLM-2Bit-2x8-hf")
prompt = st.text_input("Enter a prompt (e.g., My name is Teven and I am...)")

if st.button("Generate"):
  generated_text = generate_text(prompt)
  st.write(generated_text)