Upload 7 files
Browse files- Dockerfile +34 -0
- app.py +264 -0
- inference_utils.py +170 -0
- requirements.txt +16 -0
- static/css/style.css +641 -0
- templates/index.html +160 -0
- templates/report.html +287 -0
Dockerfile
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set environment variables
|
| 5 |
+
ENV PYTHONDONTWRITEBYTECODE 1
|
| 6 |
+
ENV PYTHONUNBUFFERED 1
|
| 7 |
+
ENV HF_HOME=/tmp/huggingface
|
| 8 |
+
|
| 9 |
+
# Set the working directory in the container
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
# Install system dependencies for OpenCV and other tools
|
| 13 |
+
RUN apt-get update && apt-get install -y \
|
| 14 |
+
libgl1-mesa-glx \
|
| 15 |
+
libglib2.0-0 \
|
| 16 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 17 |
+
|
| 18 |
+
# Copy the requirements file into the container
|
| 19 |
+
COPY requirements.txt .
|
| 20 |
+
|
| 21 |
+
# Install dependencies
|
| 22 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Copy the rest of the application code
|
| 25 |
+
COPY . .
|
| 26 |
+
|
| 27 |
+
# Create necessary directories and set permissions for HF Spaces
|
| 28 |
+
RUN mkdir -p uploads results encoder && chmod -R 777 uploads results encoder
|
| 29 |
+
|
| 30 |
+
# Expose the target port
|
| 31 |
+
EXPOSE 7860
|
| 32 |
+
|
| 33 |
+
# Run the application with Gunicorn
|
| 34 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "--timeout", "600", "app:app"]
|
app.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import zipfile
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from flask import Flask, request, redirect, url_for, send_from_directory, flash, render_template
|
| 5 |
+
from werkzeug.utils import secure_filename
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from sklearn.metrics import classification_report, precision_recall_fscore_support
|
| 8 |
+
from inference_utils import DiamondInference
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
|
| 11 |
+
# Load local environment variables from .env
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
app = Flask(__name__)
|
| 15 |
+
app.secret_key = "supersecretkey"
|
| 16 |
+
|
| 17 |
+
# Hugging Face Hub Integration
|
| 18 |
+
HF_REPO_ID = os.getenv("HF_REPO_ID", "WebashalarForML/Diamcol")
|
| 19 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 20 |
+
|
| 21 |
+
# Model Configuration
|
| 22 |
+
MODEL_ID = "322c4f4d"
|
| 23 |
+
MODEL_NAME = f"model_vit_robust_{MODEL_ID}.keras"
|
| 24 |
+
|
| 25 |
+
def download_model_from_hf():
|
| 26 |
+
from huggingface_hub import hf_hub_download
|
| 27 |
+
print("[INFO] Checking model files from Hugging Face...")
|
| 28 |
+
|
| 29 |
+
# Model file
|
| 30 |
+
if not os.path.exists(MODEL_NAME):
|
| 31 |
+
print(f"[INFO] Downloading {MODEL_NAME}...")
|
| 32 |
+
hf_hub_download(repo_id=HF_REPO_ID, filename=MODEL_NAME, token=HF_TOKEN, local_dir=".")
|
| 33 |
+
|
| 34 |
+
# Encoder files (Matches names in inference_utils.py)
|
| 35 |
+
encoder_files = [
|
| 36 |
+
f"hyperparameters_{MODEL_ID}.pkl",
|
| 37 |
+
f"cat_encoders_{MODEL_ID}.pkl",
|
| 38 |
+
f"num_scaler_{MODEL_ID}.pkl",
|
| 39 |
+
f"target_encoder_{MODEL_ID}.pkl",
|
| 40 |
+
f"norm_stats_{MODEL_ID}.pkl"
|
| 41 |
+
]
|
| 42 |
+
os.makedirs("encoder", exist_ok=True)
|
| 43 |
+
for f in encoder_files:
|
| 44 |
+
f_path = os.path.join("encoder", f)
|
| 45 |
+
if not os.path.exists(f_path):
|
| 46 |
+
print(f"[INFO] Downloading {f}...")
|
| 47 |
+
# Note: Assuming the structure on HF is encoder/filename
|
| 48 |
+
hf_hub_download(repo_id=HF_REPO_ID, filename=f"encoder/{f}", token=HF_TOKEN, local_dir=".")
|
| 49 |
+
|
| 50 |
+
UPLOAD_FOLDER = 'uploads'
|
| 51 |
+
RESULTS_FOLDER = 'results'
|
| 52 |
+
EXTRACT_FOLDER = os.path.join(UPLOAD_FOLDER, 'extracted_images')
|
| 53 |
+
|
| 54 |
+
for folder in [UPLOAD_FOLDER, RESULTS_FOLDER, EXTRACT_FOLDER]:
|
| 55 |
+
if not os.path.exists(folder):
|
| 56 |
+
os.makedirs(folder)
|
| 57 |
+
|
| 58 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
| 59 |
+
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500MB max upload
|
| 60 |
+
|
| 61 |
+
# Global inference object (lazy loaded)
|
| 62 |
+
model_path = MODEL_NAME
|
| 63 |
+
encoder_dir = "encoder"
|
| 64 |
+
infer_engine = None
|
| 65 |
+
|
| 66 |
+
def get_inference_engine():
|
| 67 |
+
global infer_engine
|
| 68 |
+
if infer_engine is None:
|
| 69 |
+
# Try downloading if missing (for Docker/HF Spaces environment)
|
| 70 |
+
try:
|
| 71 |
+
download_model_from_hf()
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"[WARNING] Could not download from HF: {e}. Expecting local files.")
|
| 74 |
+
|
| 75 |
+
infer_engine = DiamondInference(model_path, encoder_dir, MODEL_ID)
|
| 76 |
+
return infer_engine
|
| 77 |
+
|
| 78 |
+
@app.route('/flush', methods=['POST'])
|
| 79 |
+
def flush_data():
|
| 80 |
+
import shutil
|
| 81 |
+
try:
|
| 82 |
+
# Clear uploads folder
|
| 83 |
+
for filename in os.listdir(UPLOAD_FOLDER):
|
| 84 |
+
file_path = os.path.join(UPLOAD_FOLDER, filename)
|
| 85 |
+
try:
|
| 86 |
+
if os.path.isfile(file_path) or os.path.islink(file_path):
|
| 87 |
+
os.unlink(file_path)
|
| 88 |
+
elif os.path.isdir(file_path):
|
| 89 |
+
shutil.rmtree(file_path)
|
| 90 |
+
except Exception as e:
|
| 91 |
+
print(f'Failed to delete {file_path}. Reason: {e}')
|
| 92 |
+
|
| 93 |
+
# Re-create EXTRACT_FOLDER as it might have been deleted if it was a sub-dir
|
| 94 |
+
if not os.path.exists(EXTRACT_FOLDER):
|
| 95 |
+
os.makedirs(EXTRACT_FOLDER)
|
| 96 |
+
|
| 97 |
+
# Clear results folder
|
| 98 |
+
for filename in os.listdir(RESULTS_FOLDER):
|
| 99 |
+
file_path = os.path.join(RESULTS_FOLDER, filename)
|
| 100 |
+
try:
|
| 101 |
+
if os.path.isfile(file_path) or os.path.islink(file_path):
|
| 102 |
+
os.unlink(file_path)
|
| 103 |
+
elif os.path.isdir(file_path):
|
| 104 |
+
shutil.rmtree(file_path)
|
| 105 |
+
except Exception as e:
|
| 106 |
+
print(f'Failed to delete {file_path}. Reason: {e}')
|
| 107 |
+
|
| 108 |
+
flash('All data flushed successfully.')
|
| 109 |
+
except Exception as e:
|
| 110 |
+
flash(f'Error during flushing: {e}')
|
| 111 |
+
|
| 112 |
+
return redirect(url_for('index'))
|
| 113 |
+
|
| 114 |
+
@app.route('/')
|
| 115 |
+
def index():
|
| 116 |
+
return render_template('index.html')
|
| 117 |
+
|
| 118 |
+
@app.route('/upload', methods=['POST'])
|
| 119 |
+
def upload_files():
|
| 120 |
+
if 'zip_file' not in request.files or 'excel_file' not in request.files:
|
| 121 |
+
flash('Both Zip and Excel files are required.')
|
| 122 |
+
return redirect(request.url)
|
| 123 |
+
|
| 124 |
+
zip_file = request.files['zip_file']
|
| 125 |
+
excel_file = request.files['excel_file']
|
| 126 |
+
|
| 127 |
+
if zip_file.filename == '' or excel_file.filename == '':
|
| 128 |
+
flash('No selected file')
|
| 129 |
+
return redirect(request.url)
|
| 130 |
+
|
| 131 |
+
# Save and Extract Zip
|
| 132 |
+
zip_path = os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(zip_file.filename))
|
| 133 |
+
zip_file.save(zip_path)
|
| 134 |
+
|
| 135 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
| 136 |
+
zip_ref.extractall(EXTRACT_FOLDER)
|
| 137 |
+
|
| 138 |
+
# Process Excel
|
| 139 |
+
excel_path = os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(excel_file.filename))
|
| 140 |
+
excel_file.save(excel_path)
|
| 141 |
+
df = pd.read_excel(excel_path)
|
| 142 |
+
|
| 143 |
+
# Inference Logic
|
| 144 |
+
engine = get_inference_engine()
|
| 145 |
+
|
| 146 |
+
# Pre-cache all image paths for faster searching
|
| 147 |
+
all_extracted_files = []
|
| 148 |
+
for root, dirs, files in os.walk(EXTRACT_FOLDER):
|
| 149 |
+
for f in files:
|
| 150 |
+
if f.lower().endswith(('.jpg', '.jpeg', '.png')):
|
| 151 |
+
all_extracted_files.append(os.path.join(root, f))
|
| 152 |
+
|
| 153 |
+
print(f"[INFO] Found {len(all_extracted_files)} images in extraction folder.")
|
| 154 |
+
|
| 155 |
+
# Identifing ground truth for metrics
|
| 156 |
+
y_true = []
|
| 157 |
+
y_pred = []
|
| 158 |
+
|
| 159 |
+
print(f"[INFO] Initializing Inference Pipeline for {len(df)} stones...")
|
| 160 |
+
for index, row in tqdm(df.iterrows(), total=len(df), desc="Inference Progress"):
|
| 161 |
+
l_code = str(row.get('L_Code', '')).split('.')[0]
|
| 162 |
+
sr_no = str(row.get('SrNo', '')).split('.')[0]
|
| 163 |
+
stone_id = str(row.get('Stone_Id', ''))
|
| 164 |
+
|
| 165 |
+
img_path = None
|
| 166 |
+
for full_path in all_extracted_files:
|
| 167 |
+
fname = os.path.basename(full_path)
|
| 168 |
+
if l_code in fname and sr_no in fname:
|
| 169 |
+
img_path = full_path
|
| 170 |
+
break
|
| 171 |
+
|
| 172 |
+
if not img_path and stone_id != 'nan' and stone_id:
|
| 173 |
+
for full_path in all_extracted_files:
|
| 174 |
+
if stone_id in os.basename(full_path):
|
| 175 |
+
img_path = full_path
|
| 176 |
+
break
|
| 177 |
+
|
| 178 |
+
if img_path:
|
| 179 |
+
prediction = engine.predict(row, img_path)
|
| 180 |
+
# Store filename relative to EXTRACT_FOLDER for web serving
|
| 181 |
+
web_path = os.path.relpath(img_path, start=EXTRACT_FOLDER)
|
| 182 |
+
df.at[index, 'Predicted_FGrdCol'] = prediction
|
| 183 |
+
df.at[index, 'Image_Path'] = web_path
|
| 184 |
+
|
| 185 |
+
# If ground truth exists, collect it
|
| 186 |
+
if 'FGrdCol' in row and pd.notna(row['FGrdCol']):
|
| 187 |
+
y_true.append(str(row['FGrdCol']))
|
| 188 |
+
y_pred.append(str(prediction))
|
| 189 |
+
else:
|
| 190 |
+
df.at[index, 'Predicted_FGrdCol'] = "Image Not Found"
|
| 191 |
+
df.at[index, 'Image_Path'] = "N/A"
|
| 192 |
+
|
| 193 |
+
# Calculate Metrics if ground truth is available
|
| 194 |
+
metrics = None
|
| 195 |
+
if y_true:
|
| 196 |
+
report_dict = classification_report(y_true, y_pred, output_dict=True, zero_division=0)
|
| 197 |
+
|
| 198 |
+
# Clean up the report for better display
|
| 199 |
+
class_metrics = []
|
| 200 |
+
labels = sorted(list(set(y_true) | set(y_pred)))
|
| 201 |
+
|
| 202 |
+
from sklearn.metrics import confusion_matrix
|
| 203 |
+
cm = confusion_matrix(y_true, y_pred, labels=labels)
|
| 204 |
+
|
| 205 |
+
for label, scores in report_dict.items():
|
| 206 |
+
if label not in ['accuracy', 'macro avg', 'weighted avg']:
|
| 207 |
+
class_metrics.append({
|
| 208 |
+
'label': label,
|
| 209 |
+
'precision': round(scores['precision'], 4),
|
| 210 |
+
'recall': round(scores['recall'], 4),
|
| 211 |
+
'f1': round(scores['f1-score'], 4),
|
| 212 |
+
'support': scores['support']
|
| 213 |
+
})
|
| 214 |
+
|
| 215 |
+
metrics = {
|
| 216 |
+
'accuracy': round(report_dict['accuracy'], 4),
|
| 217 |
+
'class_metrics': class_metrics,
|
| 218 |
+
'weighted_avg': report_dict['weighted avg'],
|
| 219 |
+
'macro_avg': report_dict['macro avg'],
|
| 220 |
+
'precision': round(report_dict['weighted avg']['precision'], 4),
|
| 221 |
+
'recall': round(report_dict['weighted avg']['recall'], 4),
|
| 222 |
+
'f1': round(report_dict['weighted avg']['f1-score'], 4),
|
| 223 |
+
'macro_f1': round(report_dict['macro avg']['f1-score'], 4),
|
| 224 |
+
'macro_precision': round(report_dict['macro avg']['precision'], 4),
|
| 225 |
+
'macro_recall': round(report_dict['macro avg']['recall'], 4),
|
| 226 |
+
'confusion_matrix': {
|
| 227 |
+
'labels': labels,
|
| 228 |
+
'matrix': cm.tolist()
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
# Model parameters (features used for prediction)
|
| 233 |
+
model_features = ["StoneType", "Color", "Brown", "BlueUv", "GrdType", "Carat", "Result"]
|
| 234 |
+
|
| 235 |
+
# Identify "out of box" features - only if they actually contain data
|
| 236 |
+
potential_oob = ['FancyYellow', 'Type2A', 'YellowUv']
|
| 237 |
+
out_of_box_cols = []
|
| 238 |
+
for col in potential_oob:
|
| 239 |
+
if col in df.columns:
|
| 240 |
+
# Check if there is at least one non-null/non-empty value
|
| 241 |
+
if df[col].dropna().astype(str).str.strip().replace(['nan', 'None', ''], pd.NA).notna().any():
|
| 242 |
+
out_of_box_cols.append(col)
|
| 243 |
+
|
| 244 |
+
output_filename = f"report_{secure_filename(excel_file.filename)}"
|
| 245 |
+
output_path = os.path.join(RESULTS_FOLDER, output_filename)
|
| 246 |
+
df.to_excel(output_path, index=False)
|
| 247 |
+
|
| 248 |
+
return render_template('report.html',
|
| 249 |
+
report_data=df.to_dict(orient='records'),
|
| 250 |
+
report_file=output_filename,
|
| 251 |
+
out_of_box_cols=out_of_box_cols,
|
| 252 |
+
model_features=model_features,
|
| 253 |
+
metrics=metrics)
|
| 254 |
+
|
| 255 |
+
@app.route('/download/<filename>')
|
| 256 |
+
def download_file(filename):
|
| 257 |
+
return send_from_directory(RESULTS_FOLDER, filename)
|
| 258 |
+
|
| 259 |
+
@app.route('/image/<path:filename>')
|
| 260 |
+
def serve_image(filename):
|
| 261 |
+
return send_from_directory(EXTRACT_FOLDER, filename)
|
| 262 |
+
|
| 263 |
+
if __name__ == '__main__':
|
| 264 |
+
app.run(debug=True)
|
inference_utils.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import joblib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import cv2
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
from patchify import patchify
|
| 8 |
+
|
| 9 |
+
# 1. Define Custom Layers
|
| 10 |
+
@tf.keras.utils.register_keras_serializable()
|
| 11 |
+
class ClassToken(tf.keras.layers.Layer):
|
| 12 |
+
def __init__(self, **kwargs):
|
| 13 |
+
super().__init__(**kwargs)
|
| 14 |
+
def build(self, input_shape):
|
| 15 |
+
self.hidden_dim = input_shape[-1]
|
| 16 |
+
self.w = self.add_weight(
|
| 17 |
+
name="cls_token",
|
| 18 |
+
shape=(1, 1, self.hidden_dim),
|
| 19 |
+
initializer="random_normal",
|
| 20 |
+
trainable=True,
|
| 21 |
+
)
|
| 22 |
+
def call(self, inputs):
|
| 23 |
+
batch_size = tf.shape(inputs)[0]
|
| 24 |
+
cls = tf.broadcast_to(self.w, [batch_size, 1, self.hidden_dim])
|
| 25 |
+
return cls
|
| 26 |
+
|
| 27 |
+
@tf.keras.utils.register_keras_serializable()
|
| 28 |
+
class ExtractCLSToken(tf.keras.layers.Layer):
|
| 29 |
+
def __init__(self, **kwargs):
|
| 30 |
+
super().__init__(**kwargs)
|
| 31 |
+
def call(self, inputs):
|
| 32 |
+
return inputs[:, 0, :]
|
| 33 |
+
|
| 34 |
+
class DiamondInference:
|
| 35 |
+
def __init__(self, model_path, encoder_dir, model_id=None):
|
| 36 |
+
# Use provided model_id to load specific artifacts, fallback to generic if not provided
|
| 37 |
+
self.model_id = model_id
|
| 38 |
+
|
| 39 |
+
if model_id:
|
| 40 |
+
hp_path = os.path.join(encoder_dir, f"hyperparameters_{model_id}.pkl")
|
| 41 |
+
cat_path = os.path.join(encoder_dir, f"cat_encoders_{model_id}.pkl")
|
| 42 |
+
num_path = os.path.join(encoder_dir, f"num_scaler_{model_id}.pkl")
|
| 43 |
+
target_path = os.path.join(encoder_dir, f"target_encoder_{model_id}.pkl")
|
| 44 |
+
norm_stats_path = os.path.join(encoder_dir, f"norm_stats_{model_id}.pkl")
|
| 45 |
+
else:
|
| 46 |
+
# Fallback to older generic names if no ID is passed
|
| 47 |
+
hp_path = os.path.join(encoder_dir, "hyperparameters_imagenet_100ep.pkl")
|
| 48 |
+
cat_path = os.path.join(encoder_dir, "cat_encoders_imagenet_100ep.pkl")
|
| 49 |
+
num_path = os.path.join(encoder_dir, "num_scaler_imagenet_100ep.pkl")
|
| 50 |
+
target_path = os.path.join(encoder_dir, "target_encoder_imagenet_100ep.pkl")
|
| 51 |
+
norm_stats_path = os.path.join(encoder_dir, "norm_stats_imagenet_100ep.pkl")
|
| 52 |
+
|
| 53 |
+
print(f"[INFO] Loading artifacts for model ID: {model_id or 'default'}")
|
| 54 |
+
self.hp = joblib.load(hp_path)
|
| 55 |
+
self.cat_encoders = joblib.load(cat_path)
|
| 56 |
+
self.num_scaler = joblib.load(num_path)
|
| 57 |
+
self.target_encoder = joblib.load(target_path)
|
| 58 |
+
|
| 59 |
+
if os.path.exists(norm_stats_path):
|
| 60 |
+
self.norm_stats = joblib.load(norm_stats_path)
|
| 61 |
+
else:
|
| 62 |
+
# Default fallback to ImageNet stats
|
| 63 |
+
self.norm_stats = {"mean": np.array([0.485, 0.456, 0.406]), "std": np.array([0.229, 0.224, 0.225])}
|
| 64 |
+
|
| 65 |
+
self.model = tf.keras.models.load_model(
|
| 66 |
+
model_path,
|
| 67 |
+
custom_objects={"ClassToken": ClassToken, "ExtractCLSToken": ExtractCLSToken},
|
| 68 |
+
compile=False
|
| 69 |
+
)
|
| 70 |
+
print(f"[INFO] Model and artifacts loaded successfully from {model_path}.")
|
| 71 |
+
|
| 72 |
+
def apply_tta_transform(self, img, transform_type):
|
| 73 |
+
"""Apply specific Test-Time Augmentation transformation"""
|
| 74 |
+
if transform_type == "original":
|
| 75 |
+
return img
|
| 76 |
+
elif transform_type == "horizontal_flip":
|
| 77 |
+
return cv2.flip(img, 1)
|
| 78 |
+
elif transform_type == "rotation_5":
|
| 79 |
+
h, w = img.shape[:2]
|
| 80 |
+
M = cv2.getRotationMatrix2D((w//2, h//2), 5, 1.0)
|
| 81 |
+
return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT)
|
| 82 |
+
elif transform_type == "rotation_minus_5":
|
| 83 |
+
h, w = img.shape[:2]
|
| 84 |
+
M = cv2.getRotationMatrix2D((w//2, h//2), -5, 1.0)
|
| 85 |
+
return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT)
|
| 86 |
+
elif transform_type == "brightness_up":
|
| 87 |
+
return np.clip(img * 1.1, 0, 255).astype(np.uint8)
|
| 88 |
+
return img
|
| 89 |
+
|
| 90 |
+
def process_image(self, image_path, tta_transform=None):
|
| 91 |
+
try:
|
| 92 |
+
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
| 93 |
+
if image is None:
|
| 94 |
+
return np.zeros(self.hp["flat_patches_shape"], dtype=np.float32)
|
| 95 |
+
|
| 96 |
+
image = cv2.resize(image, (self.hp["image_size"], self.hp["image_size"]))
|
| 97 |
+
|
| 98 |
+
if tta_transform:
|
| 99 |
+
image = self.apply_tta_transform(image, tta_transform)
|
| 100 |
+
|
| 101 |
+
image = image / 255.0
|
| 102 |
+
image = (image - self.norm_stats["mean"]) / (self.norm_stats["std"] + 1e-7)
|
| 103 |
+
|
| 104 |
+
patch_shape = (self.hp["patch_size"], self.hp["patch_size"], self.hp["num_channels"])
|
| 105 |
+
patches = patchify(image, patch_shape, self.hp["patch_size"])
|
| 106 |
+
patches = np.reshape(patches, self.hp["flat_patches_shape"]).astype(np.float32)
|
| 107 |
+
return patches
|
| 108 |
+
except Exception as e:
|
| 109 |
+
print(f"[ERROR] Image processing failed: {e}")
|
| 110 |
+
return np.zeros(self.hp["flat_patches_shape"], dtype=np.float32)
|
| 111 |
+
|
| 112 |
+
def predict(self, df_row, image_path, use_tta=True):
|
| 113 |
+
# 1. Preprocess Tabular Data
|
| 114 |
+
# Match training categorical features: StoneType, Color, Brown, BlueUv, GrdType, Result
|
| 115 |
+
categorical_cols = ["StoneType", "Color", "Brown", "BlueUv", "GrdType", "Result"]
|
| 116 |
+
numerical_cols = ["Carat"]
|
| 117 |
+
|
| 118 |
+
tab_data_list = []
|
| 119 |
+
for col in categorical_cols:
|
| 120 |
+
val = str(df_row.get(col, "__missing__"))
|
| 121 |
+
# Safe transform for categorical values
|
| 122 |
+
try:
|
| 123 |
+
# First check if the column exists in encoders
|
| 124 |
+
if col in self.cat_encoders:
|
| 125 |
+
# Check if val is in encoder classes, otherwise fallback to __missing__
|
| 126 |
+
if val not in self.cat_encoders[col].classes_:
|
| 127 |
+
val = "__missing__" if "__missing__" in self.cat_encoders[col].classes_ else self.cat_encoders[col].classes_[0]
|
| 128 |
+
|
| 129 |
+
encoded_val = self.cat_encoders[col].transform([val])[0]
|
| 130 |
+
else:
|
| 131 |
+
print(f"[WARN] Encoder for column {col} not found. Using 0.")
|
| 132 |
+
encoded_val = 0
|
| 133 |
+
except Exception as e:
|
| 134 |
+
print(f"[ERROR] Encoding failed for {col} with value {val}: {e}. Using 0.")
|
| 135 |
+
encoded_val = 0
|
| 136 |
+
tab_data_list.append(encoded_val)
|
| 137 |
+
|
| 138 |
+
for col in numerical_cols:
|
| 139 |
+
try:
|
| 140 |
+
val = float(df_row.get(col, 0))
|
| 141 |
+
# Reshape for scaler (expected 2D array)
|
| 142 |
+
scaled_val = self.num_scaler.transform([[val]])[0][0]
|
| 143 |
+
except Exception as e:
|
| 144 |
+
print(f"[ERROR] Scaling failed for {col}: {e}. Using 0.")
|
| 145 |
+
scaled_val = 0
|
| 146 |
+
tab_data_list.append(scaled_val)
|
| 147 |
+
|
| 148 |
+
tab_input = np.expand_dims(np.array(tab_data_list, dtype=np.float32), axis=0)
|
| 149 |
+
|
| 150 |
+
# 2. Inference with TTA
|
| 151 |
+
if use_tta:
|
| 152 |
+
tta_transforms = ["original", "horizontal_flip", "rotation_5", "rotation_minus_5", "brightness_up"]
|
| 153 |
+
all_preds = []
|
| 154 |
+
|
| 155 |
+
for transform in tta_transforms:
|
| 156 |
+
img_patches = self.process_image(image_path, tta_transform=transform)
|
| 157 |
+
img_input = np.expand_dims(img_patches, axis=0)
|
| 158 |
+
preds = self.model.predict([img_input, tab_input], verbose=0)[0]
|
| 159 |
+
all_preds.append(preds)
|
| 160 |
+
|
| 161 |
+
final_pred_probs = np.mean(all_preds, axis=0)
|
| 162 |
+
else:
|
| 163 |
+
img_patches = self.process_image(image_path)
|
| 164 |
+
img_input = np.expand_dims(img_patches, axis=0)
|
| 165 |
+
final_pred_probs = self.model.predict([img_input, tab_input], verbose=0)[0]
|
| 166 |
+
|
| 167 |
+
pred_idx = np.argmax(final_pred_probs)
|
| 168 |
+
decoded_pred = self.target_encoder.inverse_transform([pred_idx])[0]
|
| 169 |
+
|
| 170 |
+
return decoded_pred
|
requirements.txt
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
gunicorn
|
| 3 |
+
pandas
|
| 4 |
+
openpyxl
|
| 5 |
+
numpy==1.26.4
|
| 6 |
+
opencv-python-headless
|
| 7 |
+
tensorflow-cpu==2.19.0
|
| 8 |
+
patchify
|
| 9 |
+
joblib
|
| 10 |
+
scikit-learn
|
| 11 |
+
werkzeug
|
| 12 |
+
huggingface_hub
|
| 13 |
+
tqdm
|
| 14 |
+
protobuf<5
|
| 15 |
+
ml-dtypes==0.4.0
|
| 16 |
+
python-dotenv
|
static/css/style.css
ADDED
|
@@ -0,0 +1,641 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@100;300;400;500;700&display=swap');
|
| 2 |
+
|
| 3 |
+
:root {
|
| 4 |
+
--primary-color: #ffffff;
|
| 5 |
+
--secondary-color: #a855f7;
|
| 6 |
+
--background-color: #050505;
|
| 7 |
+
--glass-bg: rgba(255, 255, 255, 0.03);
|
| 8 |
+
--glass-border: rgba(255, 255, 255, 0.08);
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
* {
|
| 12 |
+
box-sizing: border-box;
|
| 13 |
+
margin: 0;
|
| 14 |
+
padding: 0;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
body {
|
| 18 |
+
font-family: 'Inter', sans-serif;
|
| 19 |
+
background: var(--background-color);
|
| 20 |
+
color: white;
|
| 21 |
+
line-height: 1.5;
|
| 22 |
+
overflow-x: hidden;
|
| 23 |
+
min-height: 100vh;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/* Background elements */
|
| 27 |
+
canvas#canvas3d {
|
| 28 |
+
position: fixed;
|
| 29 |
+
top: 0;
|
| 30 |
+
left: 0;
|
| 31 |
+
width: 100vw;
|
| 32 |
+
height: 100vh;
|
| 33 |
+
z-index: 0;
|
| 34 |
+
pointer-events: none;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
.atmospheric-grain {
|
| 38 |
+
position: fixed;
|
| 39 |
+
top: 0;
|
| 40 |
+
left: 0;
|
| 41 |
+
width: 100%;
|
| 42 |
+
height: 100%;
|
| 43 |
+
z-index: 10;
|
| 44 |
+
pointer-events: none;
|
| 45 |
+
opacity: 0.03;
|
| 46 |
+
filter: contrast(150%) brightness(150%);
|
| 47 |
+
background-image: url('https://grainy-gradients.vercel.app/noise.svg');
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
.container {
|
| 51 |
+
position: relative;
|
| 52 |
+
z-index: 20;
|
| 53 |
+
width: 100%;
|
| 54 |
+
max-width: 1200px;
|
| 55 |
+
margin: 0 auto;
|
| 56 |
+
padding: 1.5rem;
|
| 57 |
+
display: flex;
|
| 58 |
+
flex-direction: column;
|
| 59 |
+
min-height: 100vh;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
nav {
|
| 63 |
+
display: flex;
|
| 64 |
+
justify-content: space-between;
|
| 65 |
+
align-items: center;
|
| 66 |
+
padding: 1.5rem 0;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
.nav-logo {
|
| 70 |
+
font-size: 10px;
|
| 71 |
+
letter-spacing: 0.4em;
|
| 72 |
+
font-weight: 700;
|
| 73 |
+
text-transform: uppercase;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
.nav-meta {
|
| 77 |
+
display: flex;
|
| 78 |
+
gap: 1.5rem;
|
| 79 |
+
font-size: 8px;
|
| 80 |
+
letter-spacing: 0.2em;
|
| 81 |
+
opacity: 0.5;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/* Typography */
|
| 85 |
+
header {
|
| 86 |
+
margin: 3rem 0;
|
| 87 |
+
text-align: left;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
.label-mini {
|
| 91 |
+
font-size: 9px;
|
| 92 |
+
letter-spacing: 0.3em;
|
| 93 |
+
font-weight: 700;
|
| 94 |
+
text-transform: uppercase;
|
| 95 |
+
opacity: 0.5;
|
| 96 |
+
margin-bottom: 0.5rem;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
h1 {
|
| 100 |
+
font-size: clamp(2.5rem, 8vw, 6rem);
|
| 101 |
+
font-weight: 700;
|
| 102 |
+
letter-spacing: -0.04em;
|
| 103 |
+
line-height: 0.9;
|
| 104 |
+
text-transform: uppercase;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
h1 .italic {
|
| 108 |
+
font-style: italic;
|
| 109 |
+
font-weight: 300;
|
| 110 |
+
opacity: 0.4;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
/* Components */
|
| 114 |
+
.upload-card {
|
| 115 |
+
background: rgba(255, 255, 255, 0.03);
|
| 116 |
+
backdrop-filter: blur(15px);
|
| 117 |
+
border: 1px solid rgba(255, 255, 255, 0.05);
|
| 118 |
+
padding: clamp(2rem, 5vw, 4rem);
|
| 119 |
+
margin: 2rem 0;
|
| 120 |
+
width: 100%;
|
| 121 |
+
box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
.form-group {
|
| 125 |
+
margin-bottom: 2.5rem;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
label {
|
| 129 |
+
display: block;
|
| 130 |
+
font-size: 10px;
|
| 131 |
+
letter-spacing: 0.15em;
|
| 132 |
+
text-transform: uppercase;
|
| 133 |
+
margin-bottom: 1rem;
|
| 134 |
+
opacity: 0.8;
|
| 135 |
+
color: var(--secondary-color);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
/* Custom File Input Styling */
|
| 139 |
+
input[type="file"] {
|
| 140 |
+
display: block;
|
| 141 |
+
width: 100%;
|
| 142 |
+
padding: 0.8rem;
|
| 143 |
+
background: rgba(255, 255, 255, 0.02);
|
| 144 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 145 |
+
color: rgba(255, 255, 255, 0.6);
|
| 146 |
+
font-size: 12px;
|
| 147 |
+
cursor: pointer;
|
| 148 |
+
transition: all 0.3s;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
input[type="file"]::file-selector-button {
|
| 152 |
+
background: rgba(168, 85, 247, 0.15);
|
| 153 |
+
border: 1px solid var(--secondary-color);
|
| 154 |
+
color: var(--secondary-color);
|
| 155 |
+
padding: 0.5rem 1rem;
|
| 156 |
+
margin-right: 1rem;
|
| 157 |
+
text-transform: uppercase;
|
| 158 |
+
font-size: 9px;
|
| 159 |
+
font-weight: 700;
|
| 160 |
+
letter-spacing: 0.1em;
|
| 161 |
+
cursor: pointer;
|
| 162 |
+
transition: 0.3s;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
input[type="file"]:hover::file-selector-button {
|
| 166 |
+
background: var(--secondary-color);
|
| 167 |
+
color: white;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
.btn-launch {
|
| 171 |
+
display: inline-block;
|
| 172 |
+
padding: 1.2rem 3rem;
|
| 173 |
+
background: white;
|
| 174 |
+
color: black;
|
| 175 |
+
text-transform: uppercase;
|
| 176 |
+
font-size: 11px;
|
| 177 |
+
font-weight: 700;
|
| 178 |
+
letter-spacing: 0.2em;
|
| 179 |
+
border: none;
|
| 180 |
+
cursor: pointer;
|
| 181 |
+
transition: all 0.4s cubic-bezier(0.23, 1, 0.32, 1);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.btn-launch:hover {
|
| 185 |
+
background: var(--secondary-color);
|
| 186 |
+
color: white;
|
| 187 |
+
letter-spacing: 0.3em;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
/* Metrics Dashboard */
|
| 191 |
+
.metrics-grid {
|
| 192 |
+
display: grid;
|
| 193 |
+
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
| 194 |
+
gap: 1.5rem;
|
| 195 |
+
margin: 2.5rem 0;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
.metric-item {
|
| 199 |
+
background: rgba(255, 255, 255, 0.05);
|
| 200 |
+
backdrop-filter: blur(12px);
|
| 201 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 202 |
+
padding: 2rem;
|
| 203 |
+
transition: all 0.4s cubic-bezier(0.23, 1, 0.32, 1);
|
| 204 |
+
box-shadow: 0 4px 24px -1px rgba(0, 0, 0, 0.2);
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
.metric-item:hover {
|
| 208 |
+
transform: translateY(-8px);
|
| 209 |
+
background: rgba(255, 255, 255, 0.08);
|
| 210 |
+
border-color: var(--secondary-color);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
.metric-value {
|
| 214 |
+
font-size: clamp(2.2rem, 5vw, 3.5rem);
|
| 215 |
+
font-weight: 800;
|
| 216 |
+
letter-spacing: -0.05em;
|
| 217 |
+
display: block;
|
| 218 |
+
color: white;
|
| 219 |
+
margin-bottom: 0.5rem;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
.metric-label {
|
| 223 |
+
font-size: 14px;
|
| 224 |
+
text-transform: uppercase;
|
| 225 |
+
letter-spacing: 0.25em;
|
| 226 |
+
opacity: 1;
|
| 227 |
+
font-weight: 800;
|
| 228 |
+
margin-bottom: 0.5rem;
|
| 229 |
+
display: block;
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
.metric-note {
|
| 233 |
+
font-size: 11px;
|
| 234 |
+
opacity: 0.4;
|
| 235 |
+
line-height: 1.4;
|
| 236 |
+
margin-top: 0.5rem;
|
| 237 |
+
text-transform: none;
|
| 238 |
+
letter-spacing: 0.02em;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
.summary-metrics {
|
| 242 |
+
display: flex;
|
| 243 |
+
gap: 2rem;
|
| 244 |
+
margin-top: 2rem;
|
| 245 |
+
flex-wrap: wrap;
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
.summary-card {
|
| 249 |
+
background: rgba(255, 255, 255, 0.03);
|
| 250 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 251 |
+
padding: 1.5rem;
|
| 252 |
+
flex: 1;
|
| 253 |
+
min-width: 150px;
|
| 254 |
+
text-align: center;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
.summary-card .val {
|
| 258 |
+
font-size: 2rem;
|
| 259 |
+
font-weight: 900;
|
| 260 |
+
display: block;
|
| 261 |
+
color: var(--secondary-color);
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
.summary-card .lbl {
|
| 265 |
+
font-size: 10px;
|
| 266 |
+
text-transform: uppercase;
|
| 267 |
+
letter-spacing: 0.2em;
|
| 268 |
+
opacity: 0.6;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
/* Table */
|
| 272 |
+
.table-wrapper {
|
| 273 |
+
width: 100%;
|
| 274 |
+
overflow-x: auto;
|
| 275 |
+
margin-top: 2rem;
|
| 276 |
+
background: rgba(255, 255, 255, 0.03);
|
| 277 |
+
backdrop-filter: blur(10px);
|
| 278 |
+
border: 1px solid rgba(255, 255, 255, 0.08);
|
| 279 |
+
border-radius: 4px;
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
table {
|
| 283 |
+
width: 100%;
|
| 284 |
+
border-collapse: collapse;
|
| 285 |
+
min-width: 800px;
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
th {
|
| 289 |
+
text-align: left;
|
| 290 |
+
padding: 1rem;
|
| 291 |
+
font-size: 9px;
|
| 292 |
+
letter-spacing: 0.1em;
|
| 293 |
+
text-transform: uppercase;
|
| 294 |
+
opacity: 0.5;
|
| 295 |
+
border-bottom: 1px solid var(--glass-border);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
td {
|
| 299 |
+
padding: 1rem;
|
| 300 |
+
font-size: 13px;
|
| 301 |
+
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
.badge-oob {
|
| 305 |
+
font-size: 8px;
|
| 306 |
+
padding: 0.15rem 0.4rem;
|
| 307 |
+
border: 1px solid var(--secondary-color);
|
| 308 |
+
color: var(--secondary-color);
|
| 309 |
+
text-transform: uppercase;
|
| 310 |
+
font-weight: 700;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/* Loader Overlay */
|
| 314 |
+
#loader-overlay {
|
| 315 |
+
position: fixed;
|
| 316 |
+
top: 0;
|
| 317 |
+
left: 0;
|
| 318 |
+
width: 100%;
|
| 319 |
+
height: 100%;
|
| 320 |
+
background: rgba(0, 0, 0, 0.98);
|
| 321 |
+
z-index: 10000;
|
| 322 |
+
display: none;
|
| 323 |
+
flex-direction: column;
|
| 324 |
+
align-items: center;
|
| 325 |
+
justify-content: center;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
.loader-container {
|
| 329 |
+
position: relative;
|
| 330 |
+
width: 300px;
|
| 331 |
+
text-align: center;
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
.loader-line {
|
| 335 |
+
width: 100%;
|
| 336 |
+
height: 2px;
|
| 337 |
+
background: rgba(255, 255, 255, 0.05);
|
| 338 |
+
margin-bottom: 2rem;
|
| 339 |
+
position: relative;
|
| 340 |
+
overflow: hidden;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
.loader-progress {
|
| 344 |
+
height: 100%;
|
| 345 |
+
width: 0%;
|
| 346 |
+
background: var(--secondary-color);
|
| 347 |
+
box-shadow: 0 0 20px var(--secondary-color);
|
| 348 |
+
transition: width 0.4s cubic-bezier(0.1, 0.7, 0.1, 1);
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
/* Scanning effect */
|
| 352 |
+
.loader-line::after {
|
| 353 |
+
content: '';
|
| 354 |
+
position: absolute;
|
| 355 |
+
top: 0;
|
| 356 |
+
left: -100%;
|
| 357 |
+
width: 50%;
|
| 358 |
+
height: 100%;
|
| 359 |
+
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent);
|
| 360 |
+
animation: scan 1.5s infinite;
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
@keyframes scan {
|
| 364 |
+
from {
|
| 365 |
+
left: -100%;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
to {
|
| 369 |
+
left: 200%;
|
| 370 |
+
}
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
.loader-text {
|
| 374 |
+
font-size: 11px;
|
| 375 |
+
letter-spacing: 0.4em;
|
| 376 |
+
text-transform: uppercase;
|
| 377 |
+
color: white;
|
| 378 |
+
animation: pulse 2s infinite;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
@keyframes pulse {
|
| 382 |
+
|
| 383 |
+
0%,
|
| 384 |
+
100% {
|
| 385 |
+
opacity: 0.4;
|
| 386 |
+
transform: scale(0.98);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
50% {
|
| 390 |
+
opacity: 1;
|
| 391 |
+
transform: scale(1);
|
| 392 |
+
}
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
/* Confusion Matrix */
|
| 396 |
+
.cm-container {
|
| 397 |
+
margin-top: 3rem;
|
| 398 |
+
overflow-x: auto;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
.cm-table {
|
| 402 |
+
border-collapse: collapse;
|
| 403 |
+
font-size: 11px;
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
.cm-table th,
|
| 407 |
+
.cm-table td {
|
| 408 |
+
padding: 0.8rem;
|
| 409 |
+
border: 1px solid rgba(255, 255, 255, 0.05);
|
| 410 |
+
text-align: center;
|
| 411 |
+
min-width: 50px;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
.cm-label-row th {
|
| 415 |
+
background: rgba(255, 255, 255, 0.03);
|
| 416 |
+
color: var(--secondary-color);
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
.cm-value {
|
| 420 |
+
font-weight: 700;
|
| 421 |
+
transition: background 0.3s;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
.cm-match {
|
| 425 |
+
background: rgba(168, 85, 247, 0.2);
|
| 426 |
+
color: white;
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
.cm-dimmed {
|
| 430 |
+
opacity: 0.2;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
.significance-tag {
|
| 434 |
+
font-size: 9px;
|
| 435 |
+
padding: 2px 6px;
|
| 436 |
+
background: rgba(255, 255, 255, 0.1);
|
| 437 |
+
border-radius: 3px;
|
| 438 |
+
margin-left: 8px;
|
| 439 |
+
vertical-align: middle;
|
| 440 |
+
letter-spacing: 0;
|
| 441 |
+
opacity: 0.8;
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
.tooltip-v {
|
| 445 |
+
position: relative;
|
| 446 |
+
cursor: help;
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
.tooltip-v::after {
|
| 450 |
+
content: attr(data-tip);
|
| 451 |
+
position: absolute;
|
| 452 |
+
bottom: 120%;
|
| 453 |
+
left: 50%;
|
| 454 |
+
transform: translateX(-50%);
|
| 455 |
+
padding: 8px 12px;
|
| 456 |
+
background: #111;
|
| 457 |
+
border: 1px solid var(--secondary-color);
|
| 458 |
+
color: white;
|
| 459 |
+
font-size: 10px;
|
| 460 |
+
white-space: nowrap;
|
| 461 |
+
opacity: 0;
|
| 462 |
+
pointer-events: none;
|
| 463 |
+
transition: 0.3s;
|
| 464 |
+
z-index: 100;
|
| 465 |
+
letter-spacing: 0.05em;
|
| 466 |
+
text-transform: none;
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
.tooltip-v:hover::after {
|
| 470 |
+
opacity: 1;
|
| 471 |
+
bottom: 140%;
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
/* Responsivity fixes */
|
| 475 |
+
@media (max-width: 768px) {
|
| 476 |
+
.container {
|
| 477 |
+
padding: 1rem;
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
header {
|
| 481 |
+
margin: 2rem 0;
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
.upload-card {
|
| 485 |
+
padding: 1.5rem;
|
| 486 |
+
}
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
.animate-up {
|
| 490 |
+
animation: slideUp 0.8s cubic-bezier(0.23, 1, 0.32, 1) forwards;
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
@keyframes slideUp {
|
| 494 |
+
from {
|
| 495 |
+
transform: translateY(20px);
|
| 496 |
+
opacity: 0;
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
to {
|
| 500 |
+
transform: translateY(0);
|
| 501 |
+
opacity: 1;
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
.img-thumb {
|
| 506 |
+
width: 44px;
|
| 507 |
+
height: 44px;
|
| 508 |
+
object-fit: cover;
|
| 509 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 510 |
+
border-radius: 4px;
|
| 511 |
+
cursor: pointer;
|
| 512 |
+
transition: all 0.3s cubic-bezier(0.23, 1, 0.32, 1);
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
.img-thumb:hover {
|
| 516 |
+
transform: scale(1.1) rotate(2deg);
|
| 517 |
+
border-color: var(--secondary-color);
|
| 518 |
+
box-shadow: 0 0 15px rgba(168, 85, 247, 0.3);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
#image-modal {
|
| 522 |
+
display: none;
|
| 523 |
+
position: fixed;
|
| 524 |
+
top: 0;
|
| 525 |
+
left: 0;
|
| 526 |
+
width: 100vw;
|
| 527 |
+
height: 100vh;
|
| 528 |
+
background: rgba(0, 0, 0, 0.95);
|
| 529 |
+
z-index: 9999;
|
| 530 |
+
justify-content: center;
|
| 531 |
+
align-items: center;
|
| 532 |
+
backdrop-filter: blur(15px);
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
#image-modal.active {
|
| 536 |
+
display: flex;
|
| 537 |
+
animation: fadeIn 0.3s ease;
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
#modal-img {
|
| 541 |
+
max-width: 90%;
|
| 542 |
+
max-height: 85%;
|
| 543 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 544 |
+
box-shadow: 0 0 40px rgba(0, 0, 0, 0.5);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
.modal-close {
|
| 548 |
+
position: absolute;
|
| 549 |
+
top: 2rem;
|
| 550 |
+
right: 2rem;
|
| 551 |
+
color: white;
|
| 552 |
+
font-size: 24px;
|
| 553 |
+
cursor: pointer;
|
| 554 |
+
opacity: 0.5;
|
| 555 |
+
transition: opacity 0.3s;
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
.modal-close:hover {
|
| 559 |
+
opacity: 1;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
@keyframes fadeIn {
|
| 563 |
+
from {
|
| 564 |
+
opacity: 0;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
to {
|
| 568 |
+
opacity: 1;
|
| 569 |
+
}
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
/* Flush Section */
|
| 573 |
+
.flush-section {
|
| 574 |
+
margin-top: 3rem;
|
| 575 |
+
padding-top: 2rem;
|
| 576 |
+
border-top: 1px solid rgba(255, 255, 255, 0.05);
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
.btn-flush-data {
|
| 580 |
+
padding: 0.8rem 2rem;
|
| 581 |
+
background: transparent;
|
| 582 |
+
color: #ef4444;
|
| 583 |
+
/* Red color for danger */
|
| 584 |
+
border: 1px solid #ef4444;
|
| 585 |
+
text-transform: uppercase;
|
| 586 |
+
font-size: 10px;
|
| 587 |
+
font-weight: 700;
|
| 588 |
+
letter-spacing: 0.1em;
|
| 589 |
+
cursor: pointer;
|
| 590 |
+
transition: all 0.3s cubic-bezier(0.23, 1, 0.32, 1);
|
| 591 |
+
}
|
| 592 |
+
|
| 593 |
+
.btn-flush-data:hover {
|
| 594 |
+
background: #ef4444;
|
| 595 |
+
color: white;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
.flush-hint {
|
| 599 |
+
font-size: 9px;
|
| 600 |
+
opacity: 0.4;
|
| 601 |
+
margin-top: 0.8rem;
|
| 602 |
+
letter-spacing: 0.05em;
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
/* Flash Messages */
|
| 606 |
+
.flash-container {
|
| 607 |
+
position: fixed;
|
| 608 |
+
top: 2rem;
|
| 609 |
+
left: 50%;
|
| 610 |
+
transform: translateX(-50%);
|
| 611 |
+
z-index: 11000;
|
| 612 |
+
width: auto;
|
| 613 |
+
max-width: 90%;
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
.flash-message {
|
| 617 |
+
background: var(--secondary-color);
|
| 618 |
+
color: white;
|
| 619 |
+
padding: 1rem 2rem;
|
| 620 |
+
border-radius: 4px;
|
| 621 |
+
font-size: 12px;
|
| 622 |
+
font-weight: 500;
|
| 623 |
+
text-transform: uppercase;
|
| 624 |
+
letter-spacing: 0.1em;
|
| 625 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3);
|
| 626 |
+
margin-bottom: 0.5rem;
|
| 627 |
+
transition: opacity 0.5s ease;
|
| 628 |
+
animation: slideDownFlash 0.5s cubic-bezier(0.23, 1, 0.32, 1);
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
@keyframes slideDownFlash {
|
| 632 |
+
from {
|
| 633 |
+
transform: translateY(-20px);
|
| 634 |
+
opacity: 0;
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
to {
|
| 638 |
+
transform: translateY(0);
|
| 639 |
+
opacity: 1;
|
| 640 |
+
}
|
| 641 |
+
}
|
templates/index.html
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>Diamond AI / Pipeline</title>
|
| 8 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
|
| 9 |
+
</head>
|
| 10 |
+
|
| 11 |
+
<body>
|
| 12 |
+
<canvas id="canvas3d"></canvas>
|
| 13 |
+
<div class="atmospheric-grain"></div>
|
| 14 |
+
|
| 15 |
+
<div id="loader-overlay">
|
| 16 |
+
<div class="loader-container">
|
| 17 |
+
<div class="loader-line">
|
| 18 |
+
<div class="loader-progress" id="js-loader-bar"></div>
|
| 19 |
+
</div>
|
| 20 |
+
<div class="loader-text">PROCS / ANALYZING DATA [0%]</div>
|
| 21 |
+
</div>
|
| 22 |
+
</div>
|
| 23 |
+
|
| 24 |
+
{% with messages = get_flashed_messages() %}
|
| 25 |
+
{% if messages %}
|
| 26 |
+
<div class="flash-container">
|
| 27 |
+
{% for message in messages %}
|
| 28 |
+
<div class="flash-message">{{ message }}</div>
|
| 29 |
+
{% endfor %}
|
| 30 |
+
</div>
|
| 31 |
+
{% endif %}
|
| 32 |
+
{% endwith %}
|
| 33 |
+
|
| 34 |
+
<div class="container animate-up">
|
| 35 |
+
<nav>
|
| 36 |
+
<div class="nav-logo">DIAMOND / AI</div>
|
| 37 |
+
<div class="nav-meta">
|
| 38 |
+
<span>SYSTEM V1.0</span>
|
| 39 |
+
<span>2026</span>
|
| 40 |
+
</div>
|
| 41 |
+
</nav>
|
| 42 |
+
|
| 43 |
+
<header>
|
| 44 |
+
<p class="label-mini">Automated Grading Pipeline</p>
|
| 45 |
+
<h1>DIAMOND<br /><span class="italic">INTELLIGENCE</span></h1>
|
| 46 |
+
</header>
|
| 47 |
+
|
| 48 |
+
<main>
|
| 49 |
+
<div class="upload-card">
|
| 50 |
+
<form id="upload-form" action="/upload" method="post" enctype="multipart/form-data">
|
| 51 |
+
<div class="form-group">
|
| 52 |
+
<label>01 / SELECT IMAGE BUNDLE (ZIP)</label>
|
| 53 |
+
<input type="file" name="zip_file" accept=".zip" required>
|
| 54 |
+
</div>
|
| 55 |
+
|
| 56 |
+
<div class="form-group">
|
| 57 |
+
<label>02 / SELECT DATA MANIFEST (EXCEL)</label>
|
| 58 |
+
<input type="file" name="excel_file" accept=".xlsx, .xls" required>
|
| 59 |
+
</div>
|
| 60 |
+
|
| 61 |
+
<button type="submit" class="btn-launch">Execute Phase</button>
|
| 62 |
+
</form>
|
| 63 |
+
|
| 64 |
+
<div class="flush-section">
|
| 65 |
+
<form id="flush-form" action="/flush" method="post" onsubmit="return confirmFlush()">
|
| 66 |
+
<button type="submit" class="btn-flush-data">Flush System Data</button>
|
| 67 |
+
</form>
|
| 68 |
+
<p class="flush-hint">Resets all uploads and results to free up space.</p>
|
| 69 |
+
</div>
|
| 70 |
+
</div>
|
| 71 |
+
</main>
|
| 72 |
+
</div>
|
| 73 |
+
|
| 74 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
|
| 75 |
+
<script>
|
| 76 |
+
// Simplified shader logic to be more lightweight and robust
|
| 77 |
+
const vertexShader = `varying vec2 vUv; void main() { vUv = uv; gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0); }`;
|
| 78 |
+
const fragmentShader = `
|
| 79 |
+
uniform float uTime;
|
| 80 |
+
varying vec2 vUv;
|
| 81 |
+
// Simplex 2D noise
|
| 82 |
+
vec3 permute(vec3 x) { return mod(((x*34.0)+1.0)*x, 289.0); }
|
| 83 |
+
float snoise(vec2 v){
|
| 84 |
+
const vec4 C = vec4(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
|
| 85 |
+
vec2 i = floor(v + dot(v, C.yy) );
|
| 86 |
+
vec2 x0 = v - i + dot(i, C.xx);
|
| 87 |
+
vec2 i1; i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
|
| 88 |
+
vec4 x12 = x0.xyxy + C.xxzz;
|
| 89 |
+
x12.xy -= i1;
|
| 90 |
+
i = mod(i, 289.0);
|
| 91 |
+
vec3 p = permute( permute( i.y + vec3(0.0, i1.y, 1.0 )) + i.x + vec3(0.0, i1.x, 1.0 ));
|
| 92 |
+
vec3 m = max(0.5 - vec3(dot(x0,x0), dot(x12.xy,x12.xy), dot(x12.zw,x12.zw)), 0.0);
|
| 93 |
+
m = m*m ; m = m*m ;
|
| 94 |
+
vec3 x = 2.0 * fract(p * C.www) - 1.0;
|
| 95 |
+
vec3 h = abs(x) - 0.5;
|
| 96 |
+
vec3 ox = floor(x + 0.5);
|
| 97 |
+
vec3 a0 = x - ox;
|
| 98 |
+
m *= 1.79284291400159 - 0.85373472095314 * ( a0*a0 + h*h );
|
| 99 |
+
vec3 g;
|
| 100 |
+
g.x = a0.x * x0.x + h.x * x0.y;
|
| 101 |
+
g.yz = a0.yz * x12.xz + h.yz * x12.yw;
|
| 102 |
+
return 130.0 * dot(m, g);
|
| 103 |
+
}
|
| 104 |
+
void main() {
|
| 105 |
+
vec2 uv = vUv;
|
| 106 |
+
float n = snoise(uv * 1.5 + uTime * 0.05);
|
| 107 |
+
vec3 color = mix(vec3(0.02, 0.02, 0.05), vec3(0.08, 0.04, 0.15), n * 0.5 + 0.5);
|
| 108 |
+
gl_FragColor = vec4(color, 1.0);
|
| 109 |
+
}
|
| 110 |
+
`;
|
| 111 |
+
|
| 112 |
+
const scene = new THREE.Scene();
|
| 113 |
+
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, 0, 1);
|
| 114 |
+
const renderer = new THREE.WebGLRenderer({ canvas: document.getElementById('canvas3d'), antialias: true });
|
| 115 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
| 116 |
+
|
| 117 |
+
const geometry = new THREE.PlaneGeometry(2, 2);
|
| 118 |
+
const uniforms = { uTime: { value: 0 } };
|
| 119 |
+
const material = new THREE.ShaderMaterial({ vertexShader, fragmentShader, uniforms });
|
| 120 |
+
scene.add(new THREE.Mesh(geometry, material));
|
| 121 |
+
|
| 122 |
+
const animate = (t) => {
|
| 123 |
+
uniforms.uTime.value = t * 0.001;
|
| 124 |
+
renderer.render(scene, camera);
|
| 125 |
+
requestAnimationFrame(animate);
|
| 126 |
+
};
|
| 127 |
+
requestAnimationFrame(animate);
|
| 128 |
+
|
| 129 |
+
document.getElementById('upload-form').addEventListener('submit', function () {
|
| 130 |
+
document.getElementById('loader-overlay').style.display = 'flex';
|
| 131 |
+
const loaderBar = document.getElementById('js-loader-bar');
|
| 132 |
+
const loaderText = document.querySelector('.loader-text');
|
| 133 |
+
let p = 0;
|
| 134 |
+
const itv = setInterval(() => {
|
| 135 |
+
if (p < 99) {
|
| 136 |
+
// Logic to simulate real work: slow down as it gets closer to 99
|
| 137 |
+
const increment = (99 - p) * 0.03 + 0.1;
|
| 138 |
+
p += increment;
|
| 139 |
+
}
|
| 140 |
+
const displayP = Math.floor(p);
|
| 141 |
+
loaderBar.style.width = displayP + '%';
|
| 142 |
+
loaderText.innerText = `PROCS / ANALYZING DATA [${displayP}%]`;
|
| 143 |
+
}, 60); // Faster update interval for "aliveness"
|
| 144 |
+
});
|
| 145 |
+
|
| 146 |
+
function confirmFlush() {
|
| 147 |
+
return confirm("Are you sure you want to flush all data? This will permanently delete all uploaded images and generated reports.");
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
window.onresize = () => renderer.setSize(window.innerWidth, window.innerHeight);
|
| 151 |
+
|
| 152 |
+
// Auto-hide flash messages
|
| 153 |
+
setTimeout(() => {
|
| 154 |
+
const flashes = document.querySelectorAll('.flash-message');
|
| 155 |
+
flashes.forEach(f => f.style.opacity = '0');
|
| 156 |
+
}, 3000);
|
| 157 |
+
</script>
|
| 158 |
+
</body>
|
| 159 |
+
|
| 160 |
+
</html>
|
templates/report.html
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>Diamond AI / Results</title>
|
| 8 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
|
| 9 |
+
</head>
|
| 10 |
+
|
| 11 |
+
<body>
|
| 12 |
+
<canvas id="canvas3d" style="opacity: 0.8"></canvas>
|
| 13 |
+
<div class="atmospheric-grain"></div>
|
| 14 |
+
|
| 15 |
+
<div class="container animate-up">
|
| 16 |
+
<nav>
|
| 17 |
+
<div class="nav-logo">RESULTS / DASHBOARD</div>
|
| 18 |
+
<div class="nav-meta">
|
| 19 |
+
<span>STATUS: COMPLETE</span>
|
| 20 |
+
</div>
|
| 21 |
+
</nav>
|
| 22 |
+
|
| 23 |
+
<header>
|
| 24 |
+
<p class="label-mini">Batch Processing Overview</p>
|
| 25 |
+
<h1>ANALYTICS<br /><span class="italic">REPORT</span></h1>
|
| 26 |
+
<div style="margin-top: 1.5rem;">
|
| 27 |
+
<a href="/download/{{ report_file }}" class="btn-launch" style="text-decoration: none;">Export Result
|
| 28 |
+
Dataset</a>
|
| 29 |
+
</div>
|
| 30 |
+
</header>
|
| 31 |
+
|
| 32 |
+
{% if metrics %}
|
| 33 |
+
<section class="upload-card" style="padding: 2.5rem; border-radius: 8px;">
|
| 34 |
+
<p class="label-mini">Performance Summary</p>
|
| 35 |
+
<div class="metrics-grid">
|
| 36 |
+
<div class="metric-item">
|
| 37 |
+
<span class="metric-value" style="font-size: 4rem; font-weight: 900;">{{ (metrics.accuracy *
|
| 38 |
+
100)|round(2) }}%</span>
|
| 39 |
+
<span class="metric-label" style="font-size: 14px; font-weight: 800; opacity: 1;">ACCURACY</span>
|
| 40 |
+
<p class="metric-note">Percentage of correct predictions out of total samples.</p>
|
| 41 |
+
</div>
|
| 42 |
+
<div class="metric-item">
|
| 43 |
+
<span class="metric-value" style="font-size: 4rem; font-weight: 900;">{{ metrics.f1|round(4)
|
| 44 |
+
}}</span>
|
| 45 |
+
<span class="metric-label" style="font-size: 14px; font-weight: 800; opacity: 1;">WEIGHTED F1</span>
|
| 46 |
+
<p class="metric-note">Harmonic mean of precision and recall, weighted by class frequency.</p>
|
| 47 |
+
</div>
|
| 48 |
+
<div class="metric-item">
|
| 49 |
+
<span class="metric-value" style="font-size: 4rem; font-weight: 900;">{{ metrics.macro_f1|round(4)
|
| 50 |
+
}}</span>
|
| 51 |
+
<span class="metric-label" style="font-size: 14px; font-weight: 800; opacity: 1;">MACRO F1</span>
|
| 52 |
+
<p class="metric-note">Unweighted mean of F1 scores across all classes (highlights imbalance
|
| 53 |
+
issues).</p>
|
| 54 |
+
</div>
|
| 55 |
+
<div class="metric-item">
|
| 56 |
+
<span class="metric-value" style="font-size: 4rem; font-weight: 900;">{{ metrics.precision|round(4)
|
| 57 |
+
}}</span>
|
| 58 |
+
<span class="metric-label" style="font-size: 14px; font-weight: 800; opacity: 1;">PRECISION</span>
|
| 59 |
+
<p class="metric-note">Ability of the classifier not to label a negative sample as positive.</p>
|
| 60 |
+
</div>
|
| 61 |
+
<div class="metric-item">
|
| 62 |
+
<span class="metric-value" style="font-size: 4rem; font-weight: 900;">{{ metrics.recall|round(4)
|
| 63 |
+
}}</span>
|
| 64 |
+
<span class="metric-label" style="font-size: 14px; font-weight: 800; opacity: 1;">RECALL</span>
|
| 65 |
+
<p class="metric-note">Ability of the classifier to find all positive samples.</p>
|
| 66 |
+
</div>
|
| 67 |
+
</div>
|
| 68 |
+
|
| 69 |
+
<!-- Confusion Matrix -->
|
| 70 |
+
<p class="label-mini" style="margin-top: 4rem;">Confusion Matrix (Prediction Stability)</p>
|
| 71 |
+
<div class="cm-container">
|
| 72 |
+
<table class="cm-table">
|
| 73 |
+
<thead>
|
| 74 |
+
<tr class="cm-label-row">
|
| 75 |
+
<th>Actual \ Pred</th>
|
| 76 |
+
{% for label in metrics.confusion_matrix.labels %}
|
| 77 |
+
<th>{{ label }}</th>
|
| 78 |
+
{% endfor %}
|
| 79 |
+
</tr>
|
| 80 |
+
</thead>
|
| 81 |
+
<tbody>
|
| 82 |
+
{% for row_idx in range(metrics.confusion_matrix.matrix|length) %}
|
| 83 |
+
<tr>
|
| 84 |
+
<td
|
| 85 |
+
style="background: rgba(255,255,255,0.03); font-weight: 700; color: var(--secondary-color);">
|
| 86 |
+
{{ metrics.confusion_matrix.labels[row_idx] }}
|
| 87 |
+
</td>
|
| 88 |
+
{% for col_idx in range(metrics.confusion_matrix.matrix[row_idx]|length) %}
|
| 89 |
+
{% set val = metrics.confusion_matrix.matrix[row_idx][col_idx] %}
|
| 90 |
+
<td
|
| 91 |
+
class="cm-value {% if row_idx == col_idx %}cm-match{% endif %} {% if val == 0 %}cm-dimmed{% endif %}">
|
| 92 |
+
{{ val }}
|
| 93 |
+
</td>
|
| 94 |
+
{% endfor %}
|
| 95 |
+
</tr>
|
| 96 |
+
{% endfor %}
|
| 97 |
+
</tbody>
|
| 98 |
+
</table>
|
| 99 |
+
</div>
|
| 100 |
+
|
| 101 |
+
<div class="summary-metrics" style="margin-top: 2.5rem; display: flex; gap: 1.5rem; flex-wrap: wrap;">
|
| 102 |
+
<div class="summary-card"
|
| 103 |
+
style="background: rgba(168, 85, 247, 0.05); border: 1px solid var(--secondary-color); padding: 1.5rem; flex: 1; min-width: 200px; text-align: center; border-radius: 4px;">
|
| 104 |
+
<span class="lbl"
|
| 105 |
+
style="font-size: 11px; font-weight: 800; opacity: 0.8; text-transform: uppercase; letter-spacing: 0.1em; display: block; margin-bottom: 0.5rem;">Macro
|
| 106 |
+
Avg F1</span>
|
| 107 |
+
<span class="val" style="font-size: 3rem; font-weight: 900; color: white;">{{
|
| 108 |
+
metrics.macro_f1|round(4) }}</span>
|
| 109 |
+
</div>
|
| 110 |
+
<div class="summary-card"
|
| 111 |
+
style="background: rgba(255, 255, 255, 0.03); border: 1px solid rgba(255,255,255,0.1); padding: 1.5rem; flex: 1; min-width: 200px; text-align: center; border-radius: 4px;">
|
| 112 |
+
<span class="lbl"
|
| 113 |
+
style="font-size: 11px; font-weight: 800; opacity: 0.8; text-transform: uppercase; letter-spacing: 0.1em; display: block; margin-bottom: 0.5rem;">Weighted
|
| 114 |
+
Avg F1</span>
|
| 115 |
+
<span class="val" style="font-size: 3rem; font-weight: 900; color: white;">{{ metrics.f1|round(4)
|
| 116 |
+
}}</span>
|
| 117 |
+
</div>
|
| 118 |
+
<div class="summary-card"
|
| 119 |
+
style="background: rgba(255, 255, 255, 0.03); border: 1px solid rgba(255,255,255,0.1); padding: 1.5rem; flex: 1; min-width: 200px; text-align: center; border-radius: 4px;">
|
| 120 |
+
<span class="lbl"
|
| 121 |
+
style="font-size: 11px; font-weight: 800; opacity: 0.8; text-transform: uppercase; letter-spacing: 0.1em; display: block; margin-bottom: 0.5rem;">Overall
|
| 122 |
+
Accuracy</span>
|
| 123 |
+
<span class="val" style="font-size: 3rem; font-weight: 900; color: white;">{{ (metrics.accuracy *
|
| 124 |
+
100)|round(2) }}%</span>
|
| 125 |
+
</div>
|
| 126 |
+
</div>
|
| 127 |
+
|
| 128 |
+
<p class="label-mini" style="margin-top: 4rem;">Class-Wise Decomposition</p>
|
| 129 |
+
<div class="table-wrapper">
|
| 130 |
+
<table>
|
| 131 |
+
<thead>
|
| 132 |
+
<tr>
|
| 133 |
+
<th>Grade Class</th>
|
| 134 |
+
<th>Precision</th>
|
| 135 |
+
<th>Recall</th>
|
| 136 |
+
<th>F1-Score</th>
|
| 137 |
+
<th>Support</th>
|
| 138 |
+
</tr>
|
| 139 |
+
</thead>
|
| 140 |
+
<tbody>
|
| 141 |
+
{% for item in metrics.class_metrics %}
|
| 142 |
+
<tr>
|
| 143 |
+
<td style="font-weight: 700; color: var(--secondary-color);">{{ item.label }}</td>
|
| 144 |
+
<td>{{ item.precision }}</td>
|
| 145 |
+
<td>{{ item.recall }}</td>
|
| 146 |
+
<td>{{ item.f1 }}</td>
|
| 147 |
+
<td style="opacity: 0.5;">{{ item.support }}</td>
|
| 148 |
+
</tr>
|
| 149 |
+
{% endfor %}
|
| 150 |
+
</tbody>
|
| 151 |
+
</table>
|
| 152 |
+
</div>
|
| 153 |
+
</section>
|
| 154 |
+
{% endif %}
|
| 155 |
+
|
| 156 |
+
<section class="upload-card" style="margin-top: 4rem; padding: 2.5rem; border-radius: 8px;">
|
| 157 |
+
<p class="label-mini">Inference Manifest</p>
|
| 158 |
+
<div class="table-wrapper">
|
| 159 |
+
<table>
|
| 160 |
+
<thead>
|
| 161 |
+
<tr>
|
| 162 |
+
<th>Stone Identifier</th>
|
| 163 |
+
{% for feature in model_features %}
|
| 164 |
+
<th>{{ feature }}</th>
|
| 165 |
+
{% endfor %}
|
| 166 |
+
<th>AI Result</th>
|
| 167 |
+
{% for col in out_of_box_cols %}
|
| 168 |
+
<th>{{ col }}</th>
|
| 169 |
+
{% endfor %}
|
| 170 |
+
<th>Preview</th>
|
| 171 |
+
</tr>
|
| 172 |
+
</thead>
|
| 173 |
+
<tbody>
|
| 174 |
+
{% for row in report_data %}
|
| 175 |
+
<tr>
|
| 176 |
+
<td style="font-size: 14px; font-weight: 900; color: white;">{{ row.L_Code }}</td>
|
| 177 |
+
{% for feature in model_features %}
|
| 178 |
+
<td style="opacity: 0.7;">{{ row[feature] }}</td>
|
| 179 |
+
{% endfor %}
|
| 180 |
+
<td style="color: var(--secondary-color); font-size: 14px; font-weight: 900;">{{
|
| 181 |
+
row.Predicted_FGrdCol }}</td>
|
| 182 |
+
{% for col in out_of_box_cols %}
|
| 183 |
+
<td>
|
| 184 |
+
{% if row[col] and row[col]|string != 'nan' %}
|
| 185 |
+
<span class="badge-oob">{{ row[col] }}</span>
|
| 186 |
+
{% else %}
|
| 187 |
+
<span style="opacity: 0.2;">-</span>
|
| 188 |
+
{% endif %}
|
| 189 |
+
</td>
|
| 190 |
+
{% endfor %}
|
| 191 |
+
<td>
|
| 192 |
+
{% if row.Image_Path != 'N/A' %}
|
| 193 |
+
<img src="/image/{{ row.Image_Path }}" class="img-thumb" onclick="openModal(this.src)"
|
| 194 |
+
alt="Diamond Preview">
|
| 195 |
+
{% else %}
|
| 196 |
+
<span style="font-size: 10px; opacity: 0.3;">NO IMAGE</span>
|
| 197 |
+
{% endif %}
|
| 198 |
+
</td>
|
| 199 |
+
</tr>
|
| 200 |
+
{% endfor %}
|
| 201 |
+
</tbody>
|
| 202 |
+
</table>
|
| 203 |
+
</div>
|
| 204 |
+
</section>
|
| 205 |
+
|
| 206 |
+
<footer style="margin-top: 4rem; padding-bottom: 2rem;">
|
| 207 |
+
<a href="/"
|
| 208 |
+
style="font-size: 10px; letter-spacing: 0.1em; text-transform: uppercase; color: white; opacity: 0.5; text-decoration: none;">←
|
| 209 |
+
New Batch</a>
|
| 210 |
+
</footer>
|
| 211 |
+
</div>
|
| 212 |
+
|
| 213 |
+
<!-- Image Viewer Modal -->
|
| 214 |
+
<div id="image-modal">
|
| 215 |
+
<span class="modal-close" onclick="closeModal()">×</span>
|
| 216 |
+
<img id="modal-img" src="" alt="Enlarged View">
|
| 217 |
+
</div>
|
| 218 |
+
|
| 219 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
|
| 220 |
+
<script>
|
| 221 |
+
function openModal(src) {
|
| 222 |
+
const modal = document.getElementById('image-modal');
|
| 223 |
+
const modalImg = document.getElementById('modal-img');
|
| 224 |
+
modal.classList.add('active');
|
| 225 |
+
modalImg.src = src;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
function closeModal() {
|
| 229 |
+
const modal = document.getElementById('image-modal');
|
| 230 |
+
modal.classList.remove('active');
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
// Close modal on background click
|
| 234 |
+
document.getElementById('image-modal').onclick = function (e) {
|
| 235 |
+
if (e.target === this) closeModal();
|
| 236 |
+
};
|
| 237 |
+
|
| 238 |
+
// Shader from index.html repeated for consistency
|
| 239 |
+
const vertexShader = `varying vec2 vUv; void main() { vUv = uv; gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0); }`;
|
| 240 |
+
const fragmentShader = `
|
| 241 |
+
uniform float uTime;
|
| 242 |
+
varying vec2 vUv;
|
| 243 |
+
vec3 permute(vec3 x) { return mod(((x*34.0)+1.0)*x, 289.0); }
|
| 244 |
+
float snoise(vec2 v){
|
| 245 |
+
const vec4 C = vec4(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
|
| 246 |
+
vec2 i = floor(v + dot(v, C.yy) );
|
| 247 |
+
vec2 x0 = v - i + dot(i, C.xx);
|
| 248 |
+
vec2 i1; i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
|
| 249 |
+
vec4 x12 = x0.xyxy + C.xxzz; x12.xy -= i1;
|
| 250 |
+
i = mod(i, 289.0);
|
| 251 |
+
vec3 p = permute( permute( i.y + vec3(0.0, i1.y, 1.0 )) + i.x + vec3(0.0, i1.x, 1.0 ));
|
| 252 |
+
vec3 m = max(0.5 - vec3(dot(x0,x0), dot(x12.xy,x12.xy), dot(x12.zw,x12.zw)), 0.0);
|
| 253 |
+
m = m*m ; m = m*m ;
|
| 254 |
+
vec3 x = 2.0 * fract(p * C.www) - 1.0;
|
| 255 |
+
vec3 h = abs(x) - 0.5;
|
| 256 |
+
vec3 ox = floor(x + 0.5);
|
| 257 |
+
vec3 a0 = x - ox;
|
| 258 |
+
m *= 1.79284291400159 - 0.85373472095314 * ( a0*a0 + h*h );
|
| 259 |
+
vec3 g; g.x = a0.x * x0.x + h.x * x0.y; g.yz = a0.yz * x12.xz + h.yz * x12.yw;
|
| 260 |
+
return 130.0 * dot(m, g);
|
| 261 |
+
}
|
| 262 |
+
void main() {
|
| 263 |
+
vec2 uv = vUv;
|
| 264 |
+
float n = snoise(uv * 1.5 + uTime * 0.03);
|
| 265 |
+
vec3 color = mix(vec3(0.0, 0.0, 0.0), vec3(0.05, 0.02, 0.1), n * 0.5 + 0.5);
|
| 266 |
+
gl_FragColor = vec4(color, 1.0);
|
| 267 |
+
}
|
| 268 |
+
`;
|
| 269 |
+
const scene = new THREE.Scene();
|
| 270 |
+
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, 0, 1);
|
| 271 |
+
const renderer = new THREE.WebGLRenderer({ canvas: document.getElementById('canvas3d'), antialias: true });
|
| 272 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
| 273 |
+
const geometry = new THREE.PlaneGeometry(2, 2);
|
| 274 |
+
const uniforms = { uTime: { value: 0 } };
|
| 275 |
+
const material = new THREE.ShaderMaterial({ vertexShader, fragmentShader, uniforms });
|
| 276 |
+
scene.add(new THREE.Mesh(geometry, material));
|
| 277 |
+
const animate = (t) => {
|
| 278 |
+
uniforms.uTime.value = t * 0.001;
|
| 279 |
+
renderer.render(scene, camera);
|
| 280 |
+
requestAnimationFrame(animate);
|
| 281 |
+
};
|
| 282 |
+
requestAnimationFrame(animate);
|
| 283 |
+
window.onresize = () => renderer.setSize(window.innerWidth, window.innerHeight);
|
| 284 |
+
</script>
|
| 285 |
+
</body>
|
| 286 |
+
|
| 287 |
+
</html>
|