| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | from fastapi import FastAPI, HTTPException, Request, Depends, Query |
| | from fastapi.responses import JSONResponse, HTMLResponse |
| | from fastapi.middleware.cors import CORSMiddleware |
| | from fastapi.staticfiles import StaticFiles |
| | import pandas as pd |
| | import joblib |
| | import numpy as np |
| | import os |
| | import time |
| | import pickle |
| | from datetime import datetime |
| | from sklearn.ensemble import RandomForestRegressor |
| | from pydantic import BaseModel, ValidationError, Field, field_validator, model_validator |
| | from typing import Any, Dict, List, Optional, Union |
| | from scipy import stats |
| | import json |
| |
|
| | |
| | |
| | |
| |
|
| | app = FastAPI( |
| | title="UCS Prediction API with Uncertainty Quantification", |
| | description=""" |
| | **Advanced API for predicting Unconfined Compressive Strength (UCS) of cement-stabilized soils** |
| | |
| | This application implements the uncertainty quantification system developed in the research |
| | "Prediction of Unconfined Compressive Strength in Cement-Treated Soil: A Machine Learning Approach". |
| | |
| | **Main features:** |
| | - Accurate UCS predictions using optimized Random Forest |
| | - Complete uncertainty quantification with calibrated confidence intervals |
| | - Sensitivity analysis for parameter optimization |
| | - Interpretability through feature importance analysis |
| | |
| | **Developed by:** Research Team - Technical University Gheorghe Asachi of IaΘi |
| | """, |
| | version="2.0.0", |
| | contact={ |
| | "name": "UCS Development Team", |
| | "email": "iancu-bogdan.teodoru@academic.tuiasi.ro", |
| | } |
| | ) |
| |
|
| | |
| | app.add_middleware( |
| | CORSMiddleware, |
| | allow_origins=[ |
| | "http://www.bi4e-at.tuiasi.ro", |
| | "https://www.bi4e-at.tuiasi.ro" |
| | |
| | |
| | ], |
| | allow_credentials=True, |
| | allow_methods=["GET", "POST", "OPTIONS"], |
| | allow_headers=["*"], |
| | ) |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | MODELS_DIR = "./models_for_deployment" |
| | PRIMARY_MODEL_PATH = os.path.join(MODELS_DIR, "rf_primary_model.joblib") |
| | UNCERTAINTY_MODEL_PATH = os.path.join(MODELS_DIR, "rf_uncertainty_model.joblib") |
| | METADATA_PATH = os.path.join(MODELS_DIR, "system_metadata.pkl") |
| |
|
| | |
| | DEFAULT_FEATURE_ORDER = ['cement_percent', 'curing_period', 'compaction_rate'] |
| |
|
| | |
| | primary_model = None |
| | uncertainty_model = None |
| | system_metadata = None |
| | FEATURE_ORDER = None |
| |
|
| | def load_uncertainty_system(): |
| | """ |
| | Loads and validates the entire uncertainty system. |
| | |
| | This function orchestrates the loading of all system components |
| | and performs basic validations to ensure proper operation. |
| | The process is designed to be robust and provide detailed information |
| | about any issues encountered during loading. |
| | """ |
| | global primary_model, uncertainty_model, system_metadata, FEATURE_ORDER |
| | |
| | print("π Loading uncertainty system...") |
| | start_time = time.time() |
| | |
| | try: |
| | |
| | if os.path.exists(PRIMARY_MODEL_PATH): |
| | primary_model = joblib.load(PRIMARY_MODEL_PATH) |
| | print(f"β
Primary model loaded: {type(primary_model).__name__}") |
| | else: |
| | raise FileNotFoundError(f"Primary model not found at: {PRIMARY_MODEL_PATH}") |
| | |
| | |
| | if os.path.exists(UNCERTAINTY_MODEL_PATH): |
| | uncertainty_model = joblib.load(UNCERTAINTY_MODEL_PATH) |
| | print(f"β
Uncertainty model loaded: {type(uncertainty_model).__name__}") |
| | else: |
| | raise FileNotFoundError(f"Uncertainty model not found at: {UNCERTAINTY_MODEL_PATH}") |
| | |
| | |
| | if os.path.exists(METADATA_PATH): |
| | with open(METADATA_PATH, 'rb') as f: |
| | system_metadata = pickle.load(f) |
| | print(f"β
System metadata loaded: {len(system_metadata)} keys") |
| | else: |
| | print("β οΈ System metadata not found, using default values") |
| | system_metadata = {"feature_names": DEFAULT_FEATURE_ORDER} |
| | |
| | |
| | if hasattr(primary_model, 'feature_names_in_'): |
| | FEATURE_ORDER = primary_model.feature_names_in_ |
| | elif system_metadata and 'feature_names' in system_metadata: |
| | FEATURE_ORDER = np.array(system_metadata['feature_names']) |
| | else: |
| | FEATURE_ORDER = np.array(DEFAULT_FEATURE_ORDER) |
| | |
| | |
| | validation_result = validate_models_compatibility() |
| | if not validation_result: |
| | raise ValueError("Models are not compatible with each other") |
| | |
| | load_time = time.time() - start_time |
| | print(f"π Uncertainty system loaded successfully in {load_time:.2f} seconds!") |
| | print(f"π Features: {FEATURE_ORDER.tolist()}") |
| | |
| | return True |
| | |
| | except Exception as e: |
| | print(f"β Error loading system: {str(e)}") |
| | import traceback |
| | print(traceback.format_exc()) |
| | return False |
| |
|
| | def validate_models_compatibility(): |
| | """ |
| | Validates that models are compatible and work together. |
| | |
| | This validation includes dimensional compatibility tests, |
| | data type checks and a complete functional test. |
| | """ |
| | try: |
| | |
| | test_input = np.array([[5.0, 14.0, 1.0]]) |
| | |
| | |
| | primary_pred = primary_model.predict(test_input)[0] |
| | |
| | |
| | uncertainty_input = np.column_stack([test_input, [[primary_pred]]]) |
| | uncertainty_pred = uncertainty_model.predict(uncertainty_input)[0] |
| | |
| | |
| | assert isinstance(primary_pred, (int, float, np.number)) |
| | assert isinstance(uncertainty_pred, (int, float, np.number)) |
| | assert primary_pred > 0 |
| | assert uncertainty_pred > 0 |
| | |
| | print(f"β
Compatibility test: UCS={primary_pred:.1f} kPa, Ο={uncertainty_pred:.1f} kPa") |
| | return True |
| | |
| | except Exception as e: |
| | print(f"β Compatibility test failed: {str(e)}") |
| | return False |
| |
|
| | |
| | system_loaded = load_uncertainty_system() |
| |
|
| | |
| | |
| | |
| |
|
| | class SoilInput(BaseModel): |
| | """ |
| | Model for soil input data. |
| | |
| | This class defines and validates input parameters, |
| | ensuring values are within validated experimental ranges. |
| | """ |
| | cement_perecent: float = Field( |
| | ..., |
| | description="Cement percentage in mixture", |
| | ge=0, le=15, |
| | example=5.0 |
| | ) |
| | curing_period: float = Field( |
| | ..., |
| | description="Curing period in days", |
| | ge=0, le=90, |
| | example=28.0 |
| | ) |
| | compaction_rate: float = Field( |
| | ..., |
| | description="Compaction rate in mm/min", |
| | ge=0.5, le=2.0, |
| | example=1.0 |
| | ) |
| |
|
| | @model_validator(mode="after") |
| | def validate_cement_curing_relationship(self): |
| | """ |
| | Validates the relationship between cement content and curing period. |
| | |
| | For untreated soil (0% cement), curing period is forced to 0 |
| | because there is no cement hydration process. |
| | """ |
| | if self.cement_perecent == 0: |
| | self.curing_period = 0 |
| | elif self.cement_perecent > 0 and self.curing_period < 1: |
| | raise ValueError("For cement-treated soil, curing period must be β₯ 1 day") |
| | return self |
| |
|
| | class Config: |
| | json_schema_extra = { |
| | "example": { |
| | "cement_perecent": 5.0, |
| | "curing_period": 28.0, |
| | "compaction_rate": 1.0 |
| | } |
| | } |
| |
|
| | class ConfidenceInterval(BaseModel): |
| | """Model for a confidence interval.""" |
| | lower: float = Field(..., description="Lower bound of the interval") |
| | upper: float = Field(..., description="Upper bound of the interval") |
| | width: float = Field(..., description="Width of the interval") |
| |
|
| | class UncertaintyPredictionResponse(BaseModel): |
| | """ |
| | Complete response with uncertainty quantification. |
| | |
| | This extended structure provides the engineer with a complete picture |
| | of the prediction, including not only the estimated value but also confidence |
| | in that estimate through calibrated intervals. |
| | """ |
| | success: bool = Field(..., description="Request processing status") |
| | |
| | |
| | central_prediction: float = Field(..., description="Most probable UCS prediction") |
| | units: str = Field(default="kPa", description="Units of measurement") |
| | |
| | |
| | uncertainty_estimate: float = Field(..., description="Absolute uncertainty estimate (1-sigma)") |
| | relative_uncertainty: float = Field(..., description="Relative uncertainty as percentage") |
| | |
| | |
| | confidence_intervals: Dict[str, ConfidenceInterval] = Field( |
| | ..., |
| | description="Confidence intervals for multiple probability levels" |
| | ) |
| | |
| | |
| | interpretation: Dict[str, str] = Field(..., description="Interpretation guide for results") |
| | |
| | |
| | input_parameters: Dict[str, float] = Field(..., description="Input parameters used") |
| | prediction_time_ms: Optional[float] = Field(None, description="Processing time in milliseconds") |
| | model_info: Optional[Dict[str, Any]] = Field(None, description="Information about models used") |
| |
|
| | class SensitivityAnalysisRequest(BaseModel): |
| | """Request for sensitivity analysis.""" |
| | base_parameters: SoilInput |
| | parameter_to_vary: str = Field(..., pattern="^(cement_perecent|curing_period|compaction_rate)$") |
| | variation_range: float = Field(default=10.0, ge=1.0, le=50.0, description="Variation range in percentage") |
| | num_points: int = Field(default=11, ge=5, le=21, description="Number of points for analysis") |
| |
|
| | |
| | |
| | |
| |
|
| | def predict_with_uncertainty(input_data: np.ndarray, |
| | confidence_levels: List[float] = [0.68, 0.80, 0.90, 0.95]) -> Dict[str, Any]: |
| | """ |
| | Performs complete prediction with uncertainty quantification. |
| | |
| | This function implements the two-stage algorithm developed in research: |
| | 1. Primary model generates central UCS prediction |
| | 2. Uncertainty model estimates magnitude of probable error |
| | 3. Confidence intervals are constructed assuming normal distribution |
| | |
| | Args: |
| | input_data: Numpy array with features [cement%, curing_days, compaction_rate] |
| | confidence_levels: List of confidence levels for which to calculate intervals |
| | |
| | Returns: |
| | Dictionary with central prediction, uncertainty estimation and confidence intervals |
| | """ |
| | |
| | |
| | central_prediction = primary_model.predict(input_data)[0] |
| | |
| | |
| | |
| | |
| | uncertainty_input = np.column_stack([input_data, [[central_prediction]]]) |
| | |
| | |
| | uncertainty_estimate = uncertainty_model.predict(uncertainty_input)[0] |
| | |
| | |
| | confidence_intervals = {} |
| | |
| | for conf_level in confidence_levels: |
| | |
| | |
| | z_score = stats.norm.ppf((1 + conf_level) / 2) |
| | |
| | |
| | margin = z_score * uncertainty_estimate |
| | |
| | confidence_intervals[f'{conf_level:.0%}'] = ConfidenceInterval( |
| | lower=float(central_prediction - margin), |
| | upper=float(central_prediction + margin), |
| | width=float(2 * margin) |
| | ) |
| | |
| | |
| | relative_uncertainty = (uncertainty_estimate / central_prediction) * 100 if central_prediction != 0 else 0 |
| | |
| | return { |
| | 'central_prediction': float(central_prediction), |
| | 'uncertainty_estimate': float(uncertainty_estimate), |
| | 'relative_uncertainty': float(relative_uncertainty), |
| | 'confidence_intervals': confidence_intervals |
| | } |
| |
|
| | def generate_interpretation_guide(central_prediction: float, uncertainty_estimate: float, |
| | confidence_intervals: Dict[str, ConfidenceInterval]) -> Dict[str, str]: |
| | """ |
| | Generates a personalized interpretation guide for prediction results. |
| | |
| | This function translates statistical results into practical language for engineers, |
| | providing the necessary context for informed decision making in projects. |
| | """ |
| | |
| | |
| | interval_95 = confidence_intervals.get('95%') |
| | |
| | |
| | relative_unc = (uncertainty_estimate / central_prediction) * 100 |
| | |
| | if relative_unc <= 10: |
| | confidence_level = "very high" |
| | reliability_desc = "The prediction is very reliable for design decision making." |
| | elif relative_unc <= 20: |
| | confidence_level = "high" |
| | reliability_desc = "The prediction is reliable, we recommend validation through limited testing." |
| | elif relative_unc <= 30: |
| | confidence_level = "moderate" |
| | reliability_desc = "The prediction provides a useful estimate, but additional testing is recommended." |
| | else: |
| | confidence_level = "limited" |
| | reliability_desc = "The prediction is indicative, extensive testing is recommended for validation." |
| | |
| | interpretation = { |
| | "central_prediction": f"The most probable UCS value is {central_prediction:.0f} kPa, based on the input parameters.", |
| | |
| | "uncertainty": f"The estimated uncertainty is Β±{uncertainty_estimate:.0f} kPa ({relative_unc:.1f}%), " |
| | f"indicating {confidence_level} confidence in the prediction.", |
| | |
| | "confidence_95": f"We have 95% confidence that the actual UCS value is between " |
| | f"{interval_95.lower:.0f} and {interval_95.upper:.0f} kPa." if interval_95 else "", |
| | |
| | "reliability": reliability_desc, |
| | |
| | "practical_guidance": f"For applications with UCS requirements > {central_prediction + uncertainty_estimate:.0f} kPa, " |
| | f"consider increasing cement content or extending the curing period." |
| | } |
| | |
| | return interpretation |
| |
|
| | async def validate_models_loaded(): |
| | """Dependency function for validating model loading.""" |
| | if not system_loaded or primary_model is None or uncertainty_model is None: |
| | raise HTTPException( |
| | status_code=503, |
| | detail="Model system is not loaded correctly. Contact administrator." |
| | ) |
| | return True |
| |
|
| | |
| | |
| | |
| |
|
| | @app.get("/", response_class=HTMLResponse, summary="Main page") |
| | async def root(): |
| | """ |
| | Returns the main page with API information. |
| | """ |
| | return """ |
| | <!DOCTYPE html> |
| | <html> |
| | <head> |
| | <title>UCS Prediction API</title> |
| | <style> |
| | body { font-family: Arial, sans-serif; margin: 40px; } |
| | .header { color: #2c3e50; } |
| | .endpoint { background: #f8f9fa; padding: 15px; margin: 10px 0; border-left: 4px solid #007bff; } |
| | </style> |
| | </head> |
| | <body> |
| | <h1 class="header">ποΈ UCS Prediction API with Uncertainty Quantification</h1> |
| | <p>Advanced API for predicting unconfined compressive strength of cement-stabilized soils.</p> |
| | |
| | <h2>π Available endpoints:</h2> |
| | <div class="endpoint"> |
| | <strong>POST /predict</strong> - UCS prediction with uncertainty quantification |
| | </div> |
| | <div class="endpoint"> |
| | <strong>POST /sensitivity-analysis</strong> - Parameter sensitivity analysis |
| | </div> |
| | <div class="endpoint"> |
| | <strong>GET /status</strong> - System status |
| | </div> |
| | <div class="endpoint"> |
| | <strong>GET /model-info</strong> - Detailed model information |
| | </div> |
| | |
| | <h2>π Documentation:</h2> |
| | <p><a href="/docs">Swagger UI - Interactive documentation</a></p> |
| | <p><a href="/redoc">ReDoc - Alternative documentation</a></p> |
| | |
| | <footer style="margin-top: 40px; color: #666;"> |
| | <p>Developed by the research team - Technical University Gheorghe Asachi of IaΘi</p> |
| | </footer> |
| | </body> |
| | </html> |
| | """ |
| |
|
| | @app.post("/predict", response_model=UncertaintyPredictionResponse, |
| | summary="UCS Prediction with Uncertainty Quantification") |
| | async def predict_ucs_with_uncertainty( |
| | soil_data: SoilInput, |
| | include_model_info: bool = Query(False, description="Include detailed model information"), |
| | _: bool = Depends(validate_models_loaded) |
| | ): |
| | """ |
| | **Performs UCS prediction with complete uncertainty quantification.** |
| | |
| | This endpoint implements the advanced uncertainty system developed in our research, |
| | providing not only the central prediction but also calibrated confidence intervals at multiple levels. |
| | |
| | **Input parameters:** |
| | - **cement_percent**: Cement content (0-15%) |
| | - **curing_period**: Curing period (0-90 days) |
| | - **compaction_rate**: Compaction rate (0.5-2.0 mm/min) |
| | |
| | **Results include:** |
| | - Central UCS prediction in kPa |
| | - Absolute and relative uncertainty estimation |
| | - Confidence intervals at 68%, 80%, 90% and 95% |
| | - Personalized interpretation guide for results |
| | |
| | **Typical usage:** |
| | ```json |
| | { |
| | "cement_percent": 7.5, |
| | "curing_period": 28, |
| | "compaction_rate": 1.0 |
| | } |
| | ``` |
| | """ |
| | |
| | try: |
| | start_time = time.time() |
| | |
| | |
| | input_data = soil_data.dict() |
| | input_df = pd.DataFrame([input_data]) |
| | |
| | |
| | prediction_df = pd.DataFrame() |
| | for feature in FEATURE_ORDER: |
| | if feature in input_df.columns: |
| | prediction_df[feature] = input_df[feature] |
| | else: |
| | raise ValueError(f"Feature '{feature}' missing from input data") |
| | |
| | |
| | input_array = prediction_df.values |
| | |
| | |
| | prediction_result = predict_with_uncertainty(input_array) |
| | |
| | |
| | interpretation = generate_interpretation_guide( |
| | prediction_result['central_prediction'], |
| | prediction_result['uncertainty_estimate'], |
| | prediction_result['confidence_intervals'] |
| | ) |
| | |
| | |
| | model_info = None |
| | if include_model_info: |
| | model_info = { |
| | "primary_model": type(primary_model).__name__, |
| | "uncertainty_model": type(uncertainty_model).__name__, |
| | "feature_order": FEATURE_ORDER.tolist(), |
| | "system_metadata": system_metadata if system_metadata else "Not available" |
| | } |
| | |
| | |
| | processing_time = (time.time() - start_time) * 1000 |
| | |
| | |
| | return UncertaintyPredictionResponse( |
| | success=True, |
| | central_prediction=prediction_result['central_prediction'], |
| | units="kPa", |
| | uncertainty_estimate=prediction_result['uncertainty_estimate'], |
| | relative_uncertainty=prediction_result['relative_uncertainty'], |
| | confidence_intervals=prediction_result['confidence_intervals'], |
| | interpretation=interpretation, |
| | input_parameters=input_data, |
| | prediction_time_ms=processing_time, |
| | model_info=model_info |
| | ) |
| | |
| | except ValueError as ve: |
| | raise HTTPException(status_code=400, detail=f"Validation error: {str(ve)}") |
| | except Exception as e: |
| | raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}") |
| |
|
| | @app.post("/sensitivity-analysis", summary="Parameter Sensitivity Analysis") |
| | async def perform_sensitivity_analysis( |
| | request: SensitivityAnalysisRequest, |
| | _: bool = Depends(validate_models_loaded) |
| | ): |
| | """ |
| | **Performs sensitivity analysis for a specific parameter.** |
| | |
| | This analysis shows how variation of an input parameter affects |
| | both the central prediction and associated uncertainty, providing valuable |
| | insights for mix design optimization. |
| | """ |
| | |
| | try: |
| | base_params = request.base_parameters.dict() |
| | param_to_vary = request.parameter_to_vary |
| | variation_range = request.variation_range / 100 |
| | num_points = request.num_points |
| | |
| | |
| | base_value = base_params[param_to_vary] |
| | |
| | |
| | min_variation = base_value * (1 - variation_range) |
| | max_variation = base_value * (1 + variation_range) |
| | |
| | |
| | if param_to_vary == "cement_percent": |
| | min_variation = max(0, min_variation) |
| | max_variation = min(15, max_variation) |
| | elif param_to_vary == "curing_period": |
| | min_variation = max(0 if base_params["cement_percent"] == 0 else 1, min_variation) |
| | max_variation = min(90, max_variation) |
| | elif param_to_vary == "compaction_rate": |
| | min_variation = max(0.5, min_variation) |
| | max_variation = min(2.0, max_variation) |
| | |
| | |
| | variation_values = np.linspace(min_variation, max_variation, num_points) |
| | |
| | results = [] |
| | |
| | for value in variation_values: |
| | |
| | modified_params = base_params.copy() |
| | modified_params[param_to_vary] = float(value) |
| | |
| | |
| | if modified_params["cement_percent"] == 0: |
| | modified_params["curing_period"] = 0 |
| | |
| | |
| | input_df = pd.DataFrame([modified_params]) |
| | prediction_df = pd.DataFrame() |
| | for feature in FEATURE_ORDER: |
| | prediction_df[feature] = input_df[feature] |
| | |
| | input_array = prediction_df.values |
| | prediction_result = predict_with_uncertainty(input_array) |
| | |
| | results.append({ |
| | param_to_vary: float(value), |
| | "central_prediction": prediction_result['central_prediction'], |
| | "uncertainty_estimate": prediction_result['uncertainty_estimate'], |
| | "relative_uncertainty": prediction_result['relative_uncertainty'], |
| | "confidence_95_lower": prediction_result['confidence_intervals']['95%'].lower, |
| | "confidence_95_upper": prediction_result['confidence_intervals']['95%'].upper |
| | }) |
| | |
| | |
| | predictions = [r["central_prediction"] for r in results] |
| | uncertainties = [r["uncertainty_estimate"] for r in results] |
| | |
| | sensitivity_stats = { |
| | "parameter_range": { |
| | "min": float(min_variation), |
| | "max": float(max_variation), |
| | "base_value": float(base_value) |
| | }, |
| | "prediction_sensitivity": { |
| | "min_prediction": float(min(predictions)), |
| | "max_prediction": float(max(predictions)), |
| | "range": float(max(predictions) - min(predictions)), |
| | "relative_change": float((max(predictions) - min(predictions)) / base_params.get("central_prediction", predictions[num_points//2]) * 100) |
| | }, |
| | "uncertainty_sensitivity": { |
| | "min_uncertainty": float(min(uncertainties)), |
| | "max_uncertainty": float(max(uncertainties)), |
| | "range": float(max(uncertainties) - min(uncertainties)) |
| | } |
| | } |
| | |
| | return { |
| | "success": True, |
| | "parameter_analyzed": param_to_vary, |
| | "base_parameters": base_params, |
| | "sensitivity_data": results, |
| | "sensitivity_statistics": sensitivity_stats, |
| | "interpretation": { |
| | "parameter_impact": f"A {variation_range*100:.1f}% variation in {param_to_vary} " |
| | f"produces a change of {sensitivity_stats['prediction_sensitivity']['range']:.1f} kPa in UCS", |
| | "recommendation": "The parameter with the greatest impact should be carefully controlled in the field" |
| | if sensitivity_stats['prediction_sensitivity']['relative_change'] > 10 |
| | else "The parameter has moderate impact, small variations are acceptable" |
| | } |
| | } |
| | |
| | except Exception as e: |
| | raise HTTPException(status_code=500, detail=f"Error in sensitivity analysis: {str(e)}") |
| |
|
| | @app.get("/status", summary="System Status") |
| | async def get_system_status(): |
| | """ |
| | **Returns complete system status for uncertainty quantification.** |
| | |
| | Useful for monitoring application health and diagnosing problems. |
| | """ |
| | |
| | status_info = { |
| | "api_status": "running", |
| | "timestamp": datetime.now().isoformat(), |
| | "system_loaded": system_loaded, |
| | "models_status": { |
| | "primary_model": primary_model is not None, |
| | "uncertainty_model": uncertainty_model is not None, |
| | "metadata_available": system_metadata is not None |
| | }, |
| | "feature_configuration": { |
| | "feature_order": FEATURE_ORDER.tolist() if FEATURE_ORDER is not None else [], |
| | "num_features": len(FEATURE_ORDER) if FEATURE_ORDER is not None else 0 |
| | } |
| | } |
| | |
| | |
| | if system_loaded: |
| | try: |
| | test_result = validate_models_compatibility() |
| | status_info["functionality_test"] = "passed" if test_result else "failed" |
| | except Exception as e: |
| | status_info["functionality_test"] = f"error: {str(e)}" |
| | |
| | return status_info |
| |
|
| | @app.get("/model-info", summary="Model Information") |
| | async def get_model_information(_: bool = Depends(validate_models_loaded)): |
| | """ |
| | **Returns detailed information about the models used.** |
| | |
| | Includes model parameters, historical performance and applicability limits. |
| | """ |
| | |
| | try: |
| | model_info = { |
| | "system_type": "Two-stage Random Forest Uncertainty Quantification", |
| | "models": { |
| | "primary_model": { |
| | "type": type(primary_model).__name__, |
| | "parameters": primary_model.get_params(), |
| | "purpose": "Central UCS prediction" |
| | }, |
| | "uncertainty_model": { |
| | "type": type(uncertainty_model).__name__, |
| | "parameters": uncertainty_model.get_params(), |
| | "purpose": "Prediction error magnitude estimation" |
| | } |
| | }, |
| | "features": { |
| | "input_features": FEATURE_ORDER.tolist(), |
| | "feature_engineering": "Feature augmentation for uncertainty model (original features + central prediction)" |
| | }, |
| | "valid_ranges": { |
| | "cement_percent": {"min": 0, "max": 15, "units": "%", "note": "Based on experimental data"}, |
| | "curing_period": {"min": 0, "max": 90, "units": "days", "note": "0 only valid for 0% cement"}, |
| | "compaction_rate": {"min": 0.5, "max": 2.0, "units": "mm/min", "note": "Within experimental range"} |
| | }, |
| | "confidence_levels": ["68%", "80%", "90%", "95%"], |
| | "target_variable": { |
| | "name": "UCS", |
| | "description": "Unconfined Compressive Strength", |
| | "units": "kPa", |
| | "typical_range": "150-5500 kPa based on experimental data" |
| | } |
| | } |
| | |
| | |
| | if system_metadata: |
| | model_info["training_metadata"] = { |
| | "training_samples": system_metadata.get("n_training_samples", "Unknown"), |
| | "training_timestamp": system_metadata.get("training_timestamp", "Unknown"), |
| | "model_version": "2.0.0" |
| | } |
| | |
| | return model_info |
| | |
| | except Exception as e: |
| | raise HTTPException(status_code=500, detail=f"Error obtaining information: {str(e)}") |
| |
|
| | |
| | |
| | |
| |
|
| | @app.exception_handler(ValidationError) |
| | async def validation_exception_handler(request: Request, exc: ValidationError): |
| | """ |
| | Custom handler for Pydantic validation errors. |
| | Provides more user-friendly error messages. |
| | """ |
| | |
| | friendly_errors = [] |
| | for error in exc.errors(): |
| | field = " -> ".join(str(loc) for loc in error.get('loc', [])) |
| | message = error.get('msg', '') |
| | |
| | |
| | if "greater than or equal" in message: |
| | message = f"Value for {field} is too small" |
| | elif "less than or equal" in message: |
| | message = f"Value for {field} is too large" |
| | elif "string does not match regex" in message: |
| | message = f"Value for {field} is not valid" |
| | |
| | friendly_errors.append({ |
| | "field": field, |
| | "message": message, |
| | "error_type": error.get('type', '') |
| | }) |
| | |
| | return JSONResponse( |
| | status_code=422, |
| | content={ |
| | "success": False, |
| | "error": "Input data validation error", |
| | "details": friendly_errors, |
| | "help": "Check that all values are within specified ranges and try again" |
| | } |
| | ) |
| |
|
| | @app.exception_handler(Exception) |
| | async def general_exception_handler(request: Request, exc: Exception): |
| | """ |
| | General handler for unexpected exceptions. |
| | """ |
| | return JSONResponse( |
| | status_code=500, |
| | content={ |
| | "success": False, |
| | "error": "Internal server error", |
| | "message": "An unexpected error occurred. Contact administrator if problem persists.", |
| | "request_id": str(time.time()) |
| | } |
| | ) |
| |
|
| | |
| | |
| | |
| |
|
| | @app.on_event("startup") |
| | async def startup_event(): |
| | """ |
| | Event executed at application startup. |
| | Performs final checks and prepares system for production. |
| | """ |
| | print("π Starting UCS Prediction API v2.0...") |
| | |
| | if system_loaded: |
| | print("β
Uncertainty system loaded and functional") |
| | print(f"π Features configured: {FEATURE_ORDER.tolist()}") |
| | else: |
| | print("β WARNING: System was not loaded correctly!") |
| | print(" Check that model files are present in the models_for_deployment/ directory") |
| | |
| | print("π API available for requests") |
| |
|
| | if __name__ == "__main__": |
| | |
| | import uvicorn |
| | uvicorn.run(app, host="0.0.0.0", port=8000, reload=True) |