saifeddinemk commited on
Commit
b397dc0
1 Parent(s): d088330

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +44 -28
app.py CHANGED
@@ -1,43 +1,59 @@
1
- from fastapi import FastAPI, Form
 
 
2
  from llama_cpp import Llama
3
- from typing import List
4
  import json
5
 
 
 
 
6
  # Initialize FastAPI app
7
  app = FastAPI()
8
 
9
  # Load the Llama model
10
  llm = Llama.from_pretrained(
11
  repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
12
- filename="smollm2-360m-instruct-q8_0.gguf", # Replace with the actual path to your GGUF file
13
  )
14
 
15
- # Endpoint to generate response from model based on user input
16
- @app.post("/ask/")
17
- async def ask_question(prompt: str = Form(...)):
18
- # Format the prompt as a chat message
19
- messages = [
20
- {"role": "user", "content": prompt}
21
- ]
22
-
23
- # Generate a response using Llama
24
- response = llm.create_chat_completion(messages=messages)
25
- response_content = response["choices"][0]["message"]["content"]
 
 
26
 
27
- return {"response": response_content}
28
-
29
- # Endpoint to test a simple query (optional)
30
- @app.get("/test/")
31
- async def test():
32
- # Test the model with a simple question
33
- messages = [{"role": "user", "content": "What is the capital of France?"}]
34
- response = llm.create_chat_completion(messages=messages)
35
- response_content = response["choices"][0]["message"]["content"]
 
 
 
 
 
 
 
 
 
 
36
 
37
- return {"test_response": response_content}
38
 
39
-
40
- import uvicorn
41
 
42
- if __name__ == "__main__":
43
- uvicorn.run("main:app", host="0.0.0.0", port=8000)
 
1
+ import nest_asyncio
2
+ import uvicorn
3
+ from fastapi import FastAPI, File, UploadFile, Form
4
  from llama_cpp import Llama
5
+
6
  import json
7
 
8
+ # Allow nested event loops
9
+ nest_asyncio.apply()
10
+
11
  # Initialize FastAPI app
12
  app = FastAPI()
13
 
14
  # Load the Llama model
15
  llm = Llama.from_pretrained(
16
  repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
17
+ filename="smollm2-360m-instruct-q8_0.gguf" # Replace with the actual path to your GGUF file
18
  )
19
 
20
+ # Endpoint to upload CV file and store CV text
21
+ @app.post("/upload-cv/")
22
+ async def upload_cv(file: UploadFile = File(...)):
23
+ content = await file.read()
24
+ cv_text = content.decode("utf-8")
25
+ return {"cv_text": cv_text}
26
+
27
+ # Endpoint to compare job descriptions with the CV text
28
+ @app.post("/compare/")
29
+ async def compare_job_cv(job_descriptions: str = Form(...), cv_text: str = Form(...)):
30
+ # Split job descriptions by line
31
+ descriptions = job_descriptions.strip().split("\n")
32
+ results = []
33
 
34
+ for description in descriptions:
35
+ # Create chat messages to prompt Llama for each job description
36
+ messages = [
37
+ {"role": "user", "content": f"Compare the following job description with this resume. Job Description: {description}. Resume: {cv_text}. Give a match score and brief analysis."}
38
+ ]
39
+
40
+ # Generate response using Llama
41
+ response = llm.create_chat_completion(messages=messages)
42
+ response_content = response["choices"][0]["message"]["content"]
43
+
44
+ # Parse response content for a score and summary
45
+ try:
46
+ response_data = json.loads(response_content)
47
+ results.append(response_data)
48
+ except json.JSONDecodeError:
49
+ results.append({
50
+ "Job Description": description,
51
+ "Analysis": response_content # Use raw response if JSON parsing fails
52
+ })
53
 
54
+ return {"results": results}
55
 
56
+ # Expose the app using ngrok
 
57
 
58
+ # Run the app directly in Colab
59
+ uvicorn.run(app, host="0.0.0.0", port=8000)