YchKhan commited on
Commit
97d07f1
·
verified ·
1 Parent(s): 0112e1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -86
app.py CHANGED
@@ -108,85 +108,6 @@ class ReqGroupingResponse(BaseModel):
108
 
109
 
110
 
111
-
112
- {
113
- "categories": [
114
- {
115
- "id": 1,
116
- "title": "Robustness & Resilience",
117
- "requirements": [
118
- {
119
- "context": "Subject to the operator’s policy and regulatory requirements, an AI service provided by the 6G network or UE shall be able to provide information regarding robustness scores.",
120
- "requirement": "Expose an overall robustness score to service consumers."
121
- },
122
- {
123
- "context": "The network can respond with a missings-resilience score for the used AI application.",
124
- "requirement": "Report a missings-resilience score that quantifies tolerance to missing or corrupted input data."
125
- }
126
- ]
127
- },
128
- {
129
- "id": 2,
130
- "title": "Environmental Sustainability",
131
- "requirements": [
132
- {
133
- "context": "What is the level of energy consumption per information request (per inference run of the AI).",
134
- "requirement": "Report energy consumption per 1 000 inference requests."
135
- },
136
- {
137
- "context": "What is the portion of renewable energy of the energy consumed by the AI service.",
138
- "requirement": "Report the share of renewable energy in the AI service’s power mix."
139
- },
140
- {
141
- "context": "The application sets a requirement for the energy consumption needed for inference.",
142
- "requirement": "Allow the consumer to specify a maximum energy-per-inference threshold that must be met."
143
- }
144
- ]
145
- },
146
- {
147
- "id": 3,
148
- "title": "Explainability & Transparency",
149
- "requirements": [
150
- {
151
- "context": "Local explanation: The aim is to explain individual outputs provided by an ML model.",
152
- "requirement": "Support local explanations for single predictions."
153
- },
154
- {
155
- "context": "Global explanation: The aim is to explain the whole ML model behaviour.",
156
- "requirement": "Support global explanations that describe overall model logic."
157
- },
158
- {
159
- "context": "Third-party applications have explanations of AI agent reasoning.",
160
- "requirement": "Provide on-demand reasoning for predictions to authorised consumers."
161
- }
162
- ]
163
- },
164
- {
165
- "id": 4,
166
- "title": "Service Discovery & Criteria Negotiation",
167
- "requirements": [
168
- {
169
- "context": "A subscriber density prediction service is offered via an exposure interface.",
170
- "requirement": "Ensure AI services are discoverable through the exposure interface."
171
- },
172
- {
173
- "context": "The application requests further profile information regarding robustness, sustainability and explainability aspects.",
174
- "requirement": "Expose a profile that includes robustness, sustainability and explainability metrics."
175
- },
176
- {
177
- "context": "A service consumer shall be able to provide service criteria regarding robustness, environmental sustainability, and explainability when requesting an AI service to the 6G system.",
178
- "requirement": "Accept consumer-supplied criteria for robustness, sustainability and explainability."
179
- },
180
- {
181
- "context": "In some cases the AI service could not be fulfilled, or could fall back to a non-AI mechanism if the criteria cannot be met.",
182
- "requirement": "Support rejection or graceful fallback when agreed criteria are not satisfied."
183
- }
184
- ]
185
- }
186
- ]
187
- }
188
-
189
-
190
  # ---- Solution Models ----
191
 
192
  class SolutionModel(BaseModel):
@@ -261,7 +182,12 @@ The solution must aim to maximize requirement satisfaction while respecting the
261
  Provide a clear and well-reasoned description of how your solution addresses each requirement.
262
  """
263
 
264
- # ---- Main Endpoint ----
 
 
 
 
 
265
 
266
  @app.post("/find_solutions", response_model=SolutionsResponse)
267
  async def find_solutions(requirements: ReqGroupingResponse):
@@ -318,9 +244,4 @@ async def find_solutions(requirements: ReqGroupingResponse):
318
  )
319
  solutions.append(error_solution)
320
 
321
- return SolutionsResponse(solutions=solutions)
322
-
323
-
324
- @app.get("/")
325
- def greet_json():
326
- return {"Status": "OK!"}
 
108
 
109
 
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  # ---- Solution Models ----
112
 
113
  class SolutionModel(BaseModel):
 
182
  Provide a clear and well-reasoned description of how your solution addresses each requirement.
183
  """
184
 
185
+ # ---- Endpoints ----
186
+
187
+ @app.get("/")
188
+ def greet_json():
189
+ return {"Hey!": "SoA Finder is running!!"}
190
+
191
 
192
  @app.post("/find_solutions", response_model=SolutionsResponse)
193
  async def find_solutions(requirements: ReqGroupingResponse):
 
244
  )
245
  solutions.append(error_solution)
246
 
247
+ return SolutionsResponse(solutions=solutions)