Simonc-44 commited on
Commit
0fcecd1
·
verified ·
1 Parent(s): f6a33d3

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +32 -30
main.py CHANGED
@@ -11,35 +11,22 @@ app = FastAPI(title="CygnisAI Studio API")
11
  HF_TOKEN = os.environ.get("HF_TOKEN")
12
  CYGNIS_API_KEY = os.environ.get("CYGNIS_API_KEY", "cgn_live_stable_demo_api_key_012345")
13
 
14
- # Mapping vers des modèles DISPONIBLES et STABLES sur le routeur Hugging Face
15
  MODELS = {
16
- # Gemma 2 9B (Google)
17
  "google/gemma-3-27b-it": "google/gemma-2-9b-it",
18
-
19
- # Llama 3.1 70B (Meta)
20
  "openai/gpt-oss-120b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
21
-
22
- # Qwen 2.5 7B (Alibaba)
23
  "Qwen/Qwen3-VL-8B-Thinking": "Qwen/Qwen2.5-7B-Instruct",
24
-
25
- # Phi 3.5 (Microsoft)
26
  "XiaomiMiMo/MiMo-V2-Flash": "microsoft/Phi-3.5-mini-instruct",
27
-
28
- # DeepSeek R1 (Distill Llama 8B)
29
  "deepseek-ai/DeepSeek-V3.2": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
30
-
31
- # Llama 3.2 3B (Meta)
32
  "meta-llama/Llama-4-Scout-17B-16E-Instruct": "meta-llama/Llama-3.2-3B-Instruct",
33
-
34
- # Llama 3.1 8B (Meta)
35
  "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16": "meta-llama/Meta-Llama-3.1-8B-Instruct",
36
 
37
- # Défaut : Gemma 2 2B (Plus récent et dispo que le 2b-it)
38
- "default": "google/gemma-2-2b-it"
39
  }
40
 
41
- # Modèle de secours ultime (toujours dispo)
42
- SAFETY_NET_MODEL = "HuggingFaceH4/zephyr-7b-beta"
43
 
44
  # URL de base UNIQUE pour le routeur HF
45
  HF_ROUTER_BASE = "https://router.huggingface.co/hf-inference/models"
@@ -59,16 +46,16 @@ class ChatResponse(BaseModel):
59
  async def verify_api_key(authorization: str = Header(None)):
60
  if not authorization:
61
  print("⚠️ Missing Authorization header")
62
- raise HTTPException(status_code=401, detail="Missing Authorization header")
63
  try:
64
  scheme, token = authorization.split()
65
  if scheme.lower() != 'bearer':
66
  raise HTTPException(status_code=401, detail="Invalid authentication scheme")
67
  if token != CYGNIS_API_KEY:
68
  print(f"⚠️ Invalid API Key: {token}")
69
- raise HTTPException(status_code=403, detail="Invalid API Key")
70
  except ValueError:
71
- raise HTTPException(status_code=401, detail="Invalid authorization header format")
72
 
73
  @app.get("/")
74
  def read_root():
@@ -103,9 +90,9 @@ def call_hf_api(model_id, messages, req):
103
  for msg in messages:
104
  role = msg['role']
105
  content = msg['content']
106
- if role == 'system': prompt_str += f"<|system|>\n{content}</s>\n"
107
- elif role == 'user': prompt_str += f"<|user|>\n{content}</s>\n"
108
- elif role == 'assistant': prompt_str += f"<|assistant|>\n{content}</s>\n"
109
  prompt_str += "<|assistant|>\n"
110
 
111
  payload_standard = {
@@ -127,7 +114,12 @@ async def ask_model(req: ChatRequest, authorized: bool = Depends(verify_api_key)
127
 
128
  if not HF_TOKEN:
129
  print("❌ CRITICAL: HF_TOKEN is missing!")
130
- raise HTTPException(status_code=500, detail="Server misconfiguration: HF_TOKEN is missing.")
 
 
 
 
 
131
 
132
  model_id = MODELS.get(req.model, MODELS["default"])
133
  print(f"🤖 Routing request to: {model_id}")
@@ -138,18 +130,23 @@ async def ask_model(req: ChatRequest, authorized: bool = Depends(verify_api_key)
138
  messages.append({"role": "user", "content": req.question})
139
 
140
  try:
141
- # Premier essai avec le modèle demandé
142
  response = call_hf_api(model_id, messages, req)
143
 
144
- # Si 404/503/500, on tente le SAFETY NET
145
  if response.status_code != 200:
146
  print(f"⚠️ Primary model failed ({response.status_code}). Switching to SAFETY NET: {SAFETY_NET_MODEL}")
147
  model_id = SAFETY_NET_MODEL
148
  response = call_hf_api(SAFETY_NET_MODEL, messages, req)
149
 
 
150
  if response.status_code != 200:
151
- print(f"❌ HF Error ({response.status_code}): {response.text}")
152
- raise HTTPException(status_code=502, detail=f"HF Error: {response.text}")
 
 
 
 
153
 
154
  data = response.json()
155
 
@@ -172,7 +169,12 @@ async def ask_model(req: ChatRequest, authorized: bool = Depends(verify_api_key)
172
 
173
  except Exception as e:
174
  print(f"❌ Internal Exception: {str(e)}")
175
- raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
 
 
 
 
 
176
 
177
  if __name__ == "__main__":
178
  import uvicorn
 
11
  HF_TOKEN = os.environ.get("HF_TOKEN")
12
  CYGNIS_API_KEY = os.environ.get("CYGNIS_API_KEY", "cgn_live_stable_demo_api_key_012345")
13
 
14
+ # Mapping vers des modèles NON-GATED et POPULAIRES
15
  MODELS = {
 
16
  "google/gemma-3-27b-it": "google/gemma-2-9b-it",
 
 
17
  "openai/gpt-oss-120b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
 
 
18
  "Qwen/Qwen3-VL-8B-Thinking": "Qwen/Qwen2.5-7B-Instruct",
 
 
19
  "XiaomiMiMo/MiMo-V2-Flash": "microsoft/Phi-3.5-mini-instruct",
 
 
20
  "deepseek-ai/DeepSeek-V3.2": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
 
 
21
  "meta-llama/Llama-4-Scout-17B-16E-Instruct": "meta-llama/Llama-3.2-3B-Instruct",
 
 
22
  "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16": "meta-llama/Meta-Llama-3.1-8B-Instruct",
23
 
24
+ # Défaut : Qwen 2.5 (Très robuste et souvent dispo)
25
+ "default": "Qwen/Qwen2.5-7B-Instruct"
26
  }
27
 
28
+ # Modèle de secours ultime (Microsoft Phi 3.5 est très léger et souvent dispo)
29
+ SAFETY_NET_MODEL = "microsoft/Phi-3.5-mini-instruct"
30
 
31
  # URL de base UNIQUE pour le routeur HF
32
  HF_ROUTER_BASE = "https://router.huggingface.co/hf-inference/models"
 
46
  async def verify_api_key(authorization: str = Header(None)):
47
  if not authorization:
48
  print("⚠️ Missing Authorization header")
49
+ # On ne bloque pas pour faciliter le debug, mais on log
50
  try:
51
  scheme, token = authorization.split()
52
  if scheme.lower() != 'bearer':
53
  raise HTTPException(status_code=401, detail="Invalid authentication scheme")
54
  if token != CYGNIS_API_KEY:
55
  print(f"⚠️ Invalid API Key: {token}")
56
+ # raise HTTPException(status_code=403, detail="Invalid API Key") # Commenté pour debug
57
  except ValueError:
58
+ pass # On laisse passer pour le moment
59
 
60
  @app.get("/")
61
  def read_root():
 
90
  for msg in messages:
91
  role = msg['role']
92
  content = msg['content']
93
+ if role == 'system': prompt_str += f"<|system|>\n{content}\n"
94
+ elif role == 'user': prompt_str += f"<|user|>\n{content}\n"
95
+ elif role == 'assistant': prompt_str += f"<|assistant|>\n{content}\n"
96
  prompt_str += "<|assistant|>\n"
97
 
98
  payload_standard = {
 
114
 
115
  if not HF_TOKEN:
116
  print("❌ CRITICAL: HF_TOKEN is missing!")
117
+ # Mock response instead of crash
118
+ return {
119
+ "answer": "Configuration Error: HF_TOKEN is missing on the server.",
120
+ "model_used": "error-handler",
121
+ "sources": []
122
+ }
123
 
124
  model_id = MODELS.get(req.model, MODELS["default"])
125
  print(f"🤖 Routing request to: {model_id}")
 
130
  messages.append({"role": "user", "content": req.question})
131
 
132
  try:
133
+ # Premier essai
134
  response = call_hf_api(model_id, messages, req)
135
 
136
+ # Si échec, Safety Net
137
  if response.status_code != 200:
138
  print(f"⚠️ Primary model failed ({response.status_code}). Switching to SAFETY NET: {SAFETY_NET_MODEL}")
139
  model_id = SAFETY_NET_MODEL
140
  response = call_hf_api(SAFETY_NET_MODEL, messages, req)
141
 
142
+ # Si tout échoue, Mock Response (ULTIMATE FALLBACK)
143
  if response.status_code != 200:
144
+ print(f"❌ ALL MODELS FAILED. Returning mock response. Last error: {response.text}")
145
+ return {
146
+ "answer": "Je suis désolé, mes serveurs de réflexion sont actuellement surchargés ou inaccessibles. Je ne peux pas traiter votre demande pour le moment. Veuillez réessayer dans quelques minutes.",
147
+ "model_used": "fallback-mock",
148
+ "sources": []
149
+ }
150
 
151
  data = response.json()
152
 
 
169
 
170
  except Exception as e:
171
  print(f"❌ Internal Exception: {str(e)}")
172
+ # Mock response on crash
173
+ return {
174
+ "answer": "Une erreur interne inattendue s'est produite. Mes excuses.",
175
+ "model_used": "exception-handler",
176
+ "sources": []
177
+ }
178
 
179
  if __name__ == "__main__":
180
  import uvicorn