renceabishek commited on
Commit
5aa5ba7
·
verified ·
1 Parent(s): cee8c26

Api exposed

Browse files
Files changed (1) hide show
  1. app.py +14 -17
app.py CHANGED
@@ -1,24 +1,21 @@
 
 
1
  from transformers import pipeline
2
- import gradio as gr
3
 
4
- # Load the model pipeline
5
  qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-small")
6
-
7
- # Load your resume content
8
  with open("resume.txt", "r", encoding="utf-8") as f:
9
  resume_text = f.read()
10
 
11
- # Function to generate answers
12
- def answer_question(user_question):
13
- prompt = f"Resume:\n{resume_text}\n\nQuestion: {user_question}\nAnswer:"
14
- response = qa_pipeline(prompt, max_length=100)[0]["generated_text"]
15
- return response.strip()
16
 
17
- # Gradio interface
18
- gr.Interface(
19
- fn=answer_question,
20
- inputs=gr.Textbox(lines=2, placeholder="Ask a question about Rence's resume..."),
21
- outputs="text",
22
- title="Rence's Resume Q&A Bot",
23
- description="Ask questions based on Rence Abishek's resume. The bot will answer using the resume content."
24
- ).launch(share=True)
 
1
+ from fastapi import FastAPI, Request
2
+ from pydantic import BaseModel
3
  from transformers import pipeline
4
+ import uvicorn
5
 
6
+ # Load model and resume
7
  qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-small")
 
 
8
  with open("resume.txt", "r", encoding="utf-8") as f:
9
  resume_text = f.read()
10
 
11
+ # FastAPI setup
12
+ app = FastAPI()
13
+
14
+ class Question(BaseModel):
15
+ query: str
16
 
17
+ @app.post("/predict")
18
+ async def predict(question: Question):
19
+ prompt = f"Resume:\n{resume_text}\n\nQuestion: {question.query}\nAnswer:"
20
+ result = qa_pipeline(prompt, max_length=100)[0]["generated_text"]
21
+ return {"answer": result.strip()}