File size: 809 Bytes
80cf0d1
0ff477a
 
 
80cf0d1
7d7624b
80cf0d1
0ff477a
 
7d7624b
c59d80e
 
 
 
 
 
 
0ff477a
 
7d7624b
c59d80e
7d7624b
 
c59d80e
80cf0d1
c59d80e
80cf0d1
c59d80e
 
0ff477a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from fastapi import FastAPI


MODEL_ID = "MBZUAI/LaMini-Flan-T5-77M"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)

app = FastAPI()

@app.get("/")
def root():
    return {
        "message": "✅ LaMini-Flan-T5-77M Chatbot is running!",
        "usage": "Send GET /chat?query=your+question"
    }

@app.get("/chat")
def chat(query: str):
    """
    Example: GET /chat?query=What+is+Python%3F
    Returns JSON: {"answer": "...model’s reply..."}
    """

    inputs = tokenizer(query, return_tensors="pt")
    
    outputs = model.generate(**inputs, max_new_tokens=100)
    
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return {"answer": response.strip()}