Spaces:
Sleeping
Sleeping
saifeddinemk
commited on
Commit
•
b397dc0
1
Parent(s):
d088330
Fixed app v2
Browse files
app.py
CHANGED
@@ -1,43 +1,59 @@
|
|
1 |
-
|
|
|
|
|
2 |
from llama_cpp import Llama
|
3 |
-
|
4 |
import json
|
5 |
|
|
|
|
|
|
|
6 |
# Initialize FastAPI app
|
7 |
app = FastAPI()
|
8 |
|
9 |
# Load the Llama model
|
10 |
llm = Llama.from_pretrained(
|
11 |
repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
|
12 |
-
filename="smollm2-360m-instruct-q8_0.gguf"
|
13 |
)
|
14 |
|
15 |
-
# Endpoint to
|
16 |
-
@app.post("/
|
17 |
-
async def
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
return {"
|
38 |
|
39 |
-
|
40 |
-
import uvicorn
|
41 |
|
42 |
-
|
43 |
-
|
|
|
1 |
+
import nest_asyncio
|
2 |
+
import uvicorn
|
3 |
+
from fastapi import FastAPI, File, UploadFile, Form
|
4 |
from llama_cpp import Llama
|
5 |
+
|
6 |
import json
|
7 |
|
8 |
+
# Allow nested event loops
|
9 |
+
nest_asyncio.apply()
|
10 |
+
|
11 |
# Initialize FastAPI app
|
12 |
app = FastAPI()
|
13 |
|
14 |
# Load the Llama model
|
15 |
llm = Llama.from_pretrained(
|
16 |
repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
|
17 |
+
filename="smollm2-360m-instruct-q8_0.gguf" # Replace with the actual path to your GGUF file
|
18 |
)
|
19 |
|
20 |
+
# Endpoint to upload CV file and store CV text
|
21 |
+
@app.post("/upload-cv/")
|
22 |
+
async def upload_cv(file: UploadFile = File(...)):
|
23 |
+
content = await file.read()
|
24 |
+
cv_text = content.decode("utf-8")
|
25 |
+
return {"cv_text": cv_text}
|
26 |
+
|
27 |
+
# Endpoint to compare job descriptions with the CV text
|
28 |
+
@app.post("/compare/")
|
29 |
+
async def compare_job_cv(job_descriptions: str = Form(...), cv_text: str = Form(...)):
|
30 |
+
# Split job descriptions by line
|
31 |
+
descriptions = job_descriptions.strip().split("\n")
|
32 |
+
results = []
|
33 |
|
34 |
+
for description in descriptions:
|
35 |
+
# Create chat messages to prompt Llama for each job description
|
36 |
+
messages = [
|
37 |
+
{"role": "user", "content": f"Compare the following job description with this resume. Job Description: {description}. Resume: {cv_text}. Give a match score and brief analysis."}
|
38 |
+
]
|
39 |
+
|
40 |
+
# Generate response using Llama
|
41 |
+
response = llm.create_chat_completion(messages=messages)
|
42 |
+
response_content = response["choices"][0]["message"]["content"]
|
43 |
+
|
44 |
+
# Parse response content for a score and summary
|
45 |
+
try:
|
46 |
+
response_data = json.loads(response_content)
|
47 |
+
results.append(response_data)
|
48 |
+
except json.JSONDecodeError:
|
49 |
+
results.append({
|
50 |
+
"Job Description": description,
|
51 |
+
"Analysis": response_content # Use raw response if JSON parsing fails
|
52 |
+
})
|
53 |
|
54 |
+
return {"results": results}
|
55 |
|
56 |
+
# Expose the app using ngrok
|
|
|
57 |
|
58 |
+
# Run the app directly in Colab
|
59 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|