| | import gradio as gr |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | import torch |
| |
|
| | |
| | |
| | model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
| |
|
| | try: |
| | print(f"Loading {model_name}...") |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | tokenizer.pad_token = tokenizer.eos_token |
| | |
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.float32, |
| | device_map="cpu", |
| | low_cpu_mem_usage=True |
| | ) |
| | print("Model loaded successfully") |
| | |
| | except Exception as e: |
| | print(f"Failed to load model: {e}") |
| | |
| | model, tokenizer = None, None |
| |
|
| | def generate_response(message): |
| | """Process user input and generate response""" |
| | if not message.strip(): |
| | return "Please enter a question." |
| | |
| | if model is None or tokenizer is None: |
| | return f"Model not loaded. Testing UI with: {message}" |
| | |
| | try: |
| | |
| | prompt = f"<|user|>\n{message}\n<|assistant|>\n" |
| | |
| | inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=384) |
| | |
| | |
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | inputs.input_ids, |
| | attention_mask=inputs.attention_mask, |
| | max_new_tokens=600, |
| | temperature=0.8, |
| | do_sample=True, |
| | top_p=0.9, |
| | pad_token_id=tokenizer.pad_token_id, |
| | eos_token_id=tokenizer.eos_token_id |
| | ) |
| | |
| | response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) |
| | return response.strip() |
| | |
| | except Exception as e: |
| | return f"Error: {str(e)[:100]}" |
| |
|
| | |
| | interface = gr.Interface( |
| | fn=generate_response, |
| | inputs=gr.Textbox(label="Input", placeholder="Enter programming question...", lines=3), |
| | outputs=gr.Textbox(label="Output", lines=10), |
| | title="LiveCoder API", |
| | description="LLM programming assistant", |
| | allow_flagging="never" |
| | ) |
| |
|
| | |
| | USERNAME = "sarekuwa" |
| | SPACE_NAME = "livecoder" |
| | print(f"API Endpoint: https://{USERNAME}-{SPACE_NAME}.hf.space/api/predict") |
| |
|
| | |
| | interface.queue(default_concurrency_limit=1) |
| |
|
| | |
| | interface.launch( |
| | server_name="0.0.0.0", |
| | server_port=7860, |
| | share=False, |
| | debug=True |
| | ) |