import gradio as gr | |
import torch | |
from transformers import ( | |
AutoModelForCausalLM, | |
AutoTokenizer, | |
TextIteratorStreamer | |
) | |
import os | |
import threading import Thread | |
import spaces | |
import time | |
token = os.environ["HF_TOKEN"] | |
model_name = "" | |
model = AutoModelForCausalLM.from_pretrained( | |
) |