Spaces:
Runtime error
Runtime error
Mariame Ouamer
commited on
Commit
•
9b0dc73
1
Parent(s):
1d6db7a
Add application file
Browse files- Dockerfile +13 -0
- app.py +61 -0
- requirements.txt +4 -0
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
RUN useradd -m -u 1000 user
|
4 |
+
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
8 |
+
|
9 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
10 |
+
|
11 |
+
COPY --chown=user . /app
|
12 |
+
|
13 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
from fastapi import FastAPI
|
3 |
+
from pydantic import BaseModel
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
class SpecificationInput(BaseModel):
|
8 |
+
domaine_activite: str
|
9 |
+
objectif_projet: str
|
10 |
+
description_besoin: str
|
11 |
+
exigences_techniques: str
|
12 |
+
installations_solution: str
|
13 |
+
|
14 |
+
@app.post("/specification-generator")
|
15 |
+
def generate_specification(input_data: SpecificationInput):
|
16 |
+
return {
|
17 |
+
"Domaine d'activité du client": input_data.domaine_activite,
|
18 |
+
"Objectif du projet": input_data.objectif_projet,
|
19 |
+
"Description du besoin": input_data.description_besoin,
|
20 |
+
"Exigences Techniques": input_data.exigences_techniques,
|
21 |
+
"Installations de la solution à prévoir": input_data.installations_solution
|
22 |
+
}
|
23 |
+
|
24 |
+
"""
|
25 |
+
from fastapi import FastAPI, HTTPException
|
26 |
+
from pydantic import BaseModel
|
27 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
28 |
+
import torch
|
29 |
+
import os
|
30 |
+
|
31 |
+
app = FastAPI()
|
32 |
+
|
33 |
+
# Jeton d'authentification Hugging Face
|
34 |
+
huggingface_token = os.getenv('HF_TOKEN')
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
# Charger le modèle et le tokenizer une seule fois au démarrage
|
39 |
+
model_name = "meta-llama/Meta-Llama-3-8B"
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=huggingface_token)
|
41 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=huggingface_token)
|
42 |
+
|
43 |
+
# Définir le modèle de requête
|
44 |
+
class TextGenerationRequest(BaseModel):
|
45 |
+
prompt: str
|
46 |
+
max_length: int = 50
|
47 |
+
|
48 |
+
@app.post("/specification_generator/")
|
49 |
+
def generate_text(request: TextGenerationRequest):
|
50 |
+
try:
|
51 |
+
inputs = tokenizer.encode(request.prompt, return_tensors="pt")
|
52 |
+
outputs = model.generate(inputs, max_length=request.max_length)
|
53 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
54 |
+
return {"generated_text": generated_text}
|
55 |
+
except Exception as e:
|
56 |
+
raise HTTPException(status_code=500, detail=str(e))
|
57 |
+
|
58 |
+
# Point de terminaison pour vérifier le statut de l'API
|
59 |
+
@app.get("/")
|
60 |
+
def read_root():
|
61 |
+
return {"message": "Specification Generator API is running"}
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
transformers
|
4 |
+
torch
|