santy171710 commited on
Commit
c49562c
·
verified ·
1 Parent(s): febb77e

second commit

Browse files

without models updating

Files changed (6) hide show
  1. .dockerignore +14 -0
  2. .gitattributes +1 -35
  3. .gitignore +2 -0
  4. Dockerfile +18 -0
  5. app.py +131 -0
  6. requirements.txt +6 -0
.dockerignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ venv/
2
+ __pycache__/
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+ *.db
7
+ *.sqlite3
8
+ *.log
9
+ *.env
10
+ .DS_Store
11
+ .env.*
12
+ .idea/
13
+ .vscode/
14
+ models/__pycache__/
.gitattributes CHANGED
@@ -1,35 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.keras filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv
2
+ __pycache__
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.10 as base image
2
+ FROM python:3.10
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Copy project files to the container
8
+ COPY . .
9
+
10
+ # Upgrade pip and install dependencies
11
+ RUN pip install --upgrade pip
12
+ RUN pip install -r requirements.txt
13
+
14
+ # Expose port (FastAPI default port)
15
+ EXPOSE 7860
16
+
17
+ # Start FastAPI server with uvicorn
18
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from PIL import Image
5
+ from fastapi import FastAPI, File, UploadFile, HTTPException
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from fastapi.responses import StreamingResponse
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+
10
+ # --- 1. Initialize FastAPI App ---
11
+ app = FastAPI(
12
+ title="X-Ray Denoising API",
13
+ description="An API to classify noise and denoise X-ray images.",
14
+ version="1.0.0",
15
+ )
16
+
17
+ # --- 2. Set up CORS ---
18
+ origins = [
19
+ "http://localhost:5173",
20
+ "http://localhost:3000",
21
+ "https://santy171710--classifier.hf.space"
22
+ ]
23
+
24
+ app.add_middleware(
25
+ CORSMiddleware,
26
+ allow_origins=origins,
27
+ allow_credentials=True,
28
+ allow_methods=["*"],
29
+ allow_headers=["*"],
30
+ )
31
+
32
+ # --- 3. Load AI Models ---
33
+ def load_all_models():
34
+ """Loads the classifier and all denoising models."""
35
+ print("Loading AI models...")
36
+ try:
37
+ classifier_model = tf.keras.models.load_model('models/xray_noise_classifier_resnet50v2.keras')
38
+ denoiser_models = {
39
+ 'gaussian': tf.keras.models.load_model('models/gaussian_denoiser_final_model.keras'),
40
+ 'poisson': tf.keras.models.load_model('models/poisson_denoising.keras'),
41
+ 'salt_pepper': tf.keras.models.load_model('models/salt_pepper_denoiser.keras'),
42
+ 'speckle': tf.keras.models.load_model('models/speckle_denoising_final_model.keras')
43
+ }
44
+ print("✅ Models loaded successfully!")
45
+ return classifier_model, denoiser_models
46
+ except Exception as e:
47
+ print(f"❌ Error loading models: {e}")
48
+ return None, None
49
+
50
+ CLASSIFIER, DENOISERS = load_all_models()
51
+ NOISE_CLASSES = ['gaussian', 'poisson', 'salt_pepper', 'speckle']
52
+
53
+ # --- 4. Define Helper Functions ---
54
+ def preprocess_image(image_bytes: bytes):
55
+ """Converts image bytes to a NumPy array for the models."""
56
+ try:
57
+ img = Image.open(io.BytesIO(image_bytes))
58
+
59
+ # --- THIS IS THE CRUCIAL FIX ---
60
+ # 1. Convert to RGB for 3 color channels.
61
+ img = img.convert('RGB')
62
+ # 2. Resize to 224x224, the exact size the ResNet50 model expects.
63
+ img = img.resize((224, 224))
64
+
65
+ img_array = np.array(img)
66
+ # Add the batch dimension. The channel dimension is now 3.
67
+ img_array = img_array[np.newaxis, ...]
68
+ return img_array / 255.0
69
+ except Exception as e:
70
+ raise HTTPException(status_code=400, detail=f"Invalid image file. Could not preprocess. Error: {e}")
71
+
72
+ def postprocess_output(denoised_array: np.ndarray):
73
+ """Converts model output array back to an image file in memory."""
74
+ # Squeeze the array to remove the batch dimension
75
+ processed_array = np.squeeze(denoised_array)
76
+ # Denormalize from 0-1 to 0-255 and convert to integer type
77
+ processed_array = (processed_array * 255).astype(np.uint8)
78
+ image = Image.fromarray(processed_array)
79
+
80
+ img_io = io.BytesIO()
81
+ image.save(img_io, 'PNG')
82
+ img_io.seek(0)
83
+ return img_io
84
+
85
+ # --- 5. Create the API Endpoint ---
86
+ @app.post("/api/denoise", response_class=StreamingResponse)
87
+ async def denoise_image(image: UploadFile = File(...)):
88
+ """
89
+ Receives an X-ray image, classifies the noise, applies the correct
90
+ denoiser model, and returns the cleaned image.
91
+ """
92
+ if not CLASSIFIER or not DENOISERS:
93
+ raise HTTPException(status_code=503, detail="Models are not available on the server.")
94
+
95
+ image_bytes = await image.read()
96
+
97
+ try:
98
+ # Preprocess for the classifier
99
+ classifier_input = preprocess_image(image_bytes)
100
+
101
+ # Run the classifier
102
+ prediction = CLASSIFIER.predict(classifier_input)
103
+ noise_type_index = np.argmax(prediction)
104
+ noise_type = NOISE_CLASSES[noise_type_index]
105
+ print(f"Detected noise type: {noise_type}")
106
+
107
+ # --- IMPORTANT ---
108
+ # We need to re-process the image for the denoiser models if they
109
+ # expect a different input size or format (e.g., grayscale 256x256).
110
+ # Assuming denoisers expect grayscale 256x256 for this example.
111
+ img_for_denoiser = Image.open(io.BytesIO(image_bytes)).convert('L').resize((256, 256))
112
+ denoiser_input = np.array(img_for_denoiser)[np.newaxis, ..., np.newaxis] / 255.0
113
+
114
+ # Select and run the correct denoiser
115
+ denoiser_model = DENOISERS[noise_type]
116
+ denoised_array = denoiser_model.predict(denoiser_input)
117
+
118
+ output_image_buffer = postprocess_output(denoised_array)
119
+
120
+ return StreamingResponse(output_image_buffer, media_type="image/png")
121
+
122
+ except HTTPException as e:
123
+ raise e
124
+ except Exception as e:
125
+ print(f"An unexpected error occurred during processing: {e}")
126
+ raise HTTPException(status_code=500, detail=f"An internal error occurred: {e}")
127
+
128
+ # --- 6. Add a root endpoint for basic health check ---
129
+ @app.get("/")
130
+ def read_root():
131
+ return {"status": "ok", "message": "Welcome to the X-Ray Denoising API!"}
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ python-multipart
4
+ tensorflow==2.19.0
5
+ numpy
6
+ Pillow