File size: 727 Bytes
4260961 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import uvicorn
class CodeRequest(BaseModel):
prompt: str
app = FastAPI()
model = GPT2LMHeadModel.from_pretrained('./codegen_model')
tokenizer = GPT2Tokenizer.from_pretrained('./codegen_model')
@app.post("/generate-code/")
def generate_code(request: CodeRequest):
inputs = tokenizer.encode(request.prompt, return_tensors='pt')
outputs = model.generate(inputs, max_length=150, num_return_sequences=1)
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_code": generated_code}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) |