File size: 1,119 Bytes
eb27501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
import requests
import os

# Get the Hugging Face API token from environment variables
API_URL = "https://api-inference.huggingface.co/models/EmTpro01/codellama-Code-Generator"
api_token = os.getenv("HF_API_TOKEN")
headers = {"Authorization": f"Bearer {api_token}"}

# Function to generate code from a prompt using the Inference API
def generate_code(prompt):
    payload = {
        "inputs": prompt,
        "parameters": {
            "max_length": 150,
            "temperature": 0.7,
            "top_k": 50
        }
    }
    response = requests.post(API_URL, headers=headers, json=payload)
    if response.status_code == 200:
        generated_code = response.json()[0]['generated_text']
        return generated_code
    else:
        return f"Error: {response.status_code}, {response.text}"

# Create the Gradio interface
interface = gr.Interface(
    fn=generate_code, 
    inputs="text", 
    outputs="text", 
    title="Code Generator using Inference API",
    description="Enter a code prompt to generate Python code using the fine-tuned model."
)

# Launch the app
interface.launch()