Test-Space / main.py
Lambang
up
0b06e10
raw
history blame
11.9 kB
import shutil
import cv2
import mediapipe as mp
from werkzeug.utils import secure_filename
import tensorflow as tf
import os
from flask import Flask, jsonify, request, flash, redirect, url_for
from pyngrok import ngrok
from fastapi import FastAPI, HTTPException, File, UploadFile, Request
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import subprocess
from hairstyle_recommendation import HairstyleRecommendation
import requests
# A FUCKING PI
app = FastAPI()
API_URL = "https://api-inference.huggingface.co/models/rizvandwiki/gender-classification-2"
headers = {"Authorization": "Bearer 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"}
public_url = "https://lambang0902-test-space.hf.space"
app.mount("/static", StaticFiles(directory="static"), name="static")
# Tempat deklarasi variabel-variabel penting
filepath = ""
list_class = ['Diamond','Oblong','Oval','Round','Square','Triangle']
list_folder = ['Training', 'Testing']
recommendation = HairstyleRecommendation()
face_crop_img = True
face_landmark_img = True
landmark_extraction_img = True
#-----------------------------------------------------
#-----------------------------------------------------
# Tempat deklarasi model dan sejenisnya
selected_model = tf.keras.models.load_model(f'models/fc_model_1.h5', compile=False)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
# #-----------------------------------------------------
# #-----------------------------------------------------
# Tempat setting server
UPLOAD_FOLDER = './upload'
UPLOAD_MODEL = './models'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg','zip','h5'}
# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# app.config['UPLOAD_MODEL'] = UPLOAD_MODEL
# app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500 MB
# #-----------------------------------------------------
#
from file_processing import FileProcess
from get_load_data import GetLoadData
from data_preprocess import DataProcessing
from train_pred import TrainPred
#-----------------------------------------------------
data_processor = DataProcessing()
data_train_pred = TrainPred()
def get_gender(filename):
with open(filename, "rb") as f:
data = f.read()
response = requests.post(API_URL, headers=headers, data=data)
return response.json()
import random
def preprocessing(filepath):
folder_path = './static/temporary'
shutil.rmtree(folder_path)
os.mkdir(folder_path)
data_processor.detect_landmark(data_processor.face_cropping_pred(filepath))
# data_processor.enhance_contrast_histeq(data_processor.face_cropping_pred(filepath))
files = os.listdir(folder_path)
index = 0
for file_name in files:
file_ext = os.path.splitext(file_name)[1]
new_file_name = str(index) + "_" + str(random.randint(1, 100000)) + file_ext
os.rename(os.path.join(folder_path, file_name), os.path.join(folder_path, new_file_name))
index += 1
print("Tungu sampai selesaiii")
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.)
#-----------------------------------------------------
# Fungsi untuk menjalankan ngrok
def run_ngrok():
try:
# Jalankan ngrok dan simpan prosesnya
ngrok_process = subprocess.Popen(['ngrok', 'http', '8000'])
return ngrok_process
except Exception as e:
print(f"Error running ngrok: {e}")
@app.get("/")
async def root():
# Dapatkan URL publik dari ngrok
return {"message": "Server berfungsi ya ges ya"}
# -------------------------------------------------------------------------
# API UNTUK MELAKUKAN PROSES PREDIKSI
# -------------------------------------------------------------------------
# Use a pipeline as a high-level helper
# from transformers import pipeline
# pipe = pipeline("image-classification", model="rizvandwiki/gender-classification-2")
@app.post('/upload/file',tags=["Predicting"])
async def upload_file(picture: UploadFile):
file_extension = picture.filename.split('.')[-1].lower()
if file_extension not in ALLOWED_EXTENSIONS:
raise HTTPException(status_code=400, detail='Invalid file extension')
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
file_path = os.path.join(UPLOAD_FOLDER, secure_filename(picture.filename))
with open(file_path, 'wb') as f:
f.write(picture.file.read())
try:
preprocessing(cv2.imread(file_path))
except Exception as e:
os.remove(file_path)
raise HTTPException(status_code=500, detail=f'Error processing image: {str(e)}')
return JSONResponse(content={'message': 'File successfully uploaded'}, status_code=200)
@app.get('/get_images', tags=["Predicting"])
def get_images():
folder_path = "./static/temporary"
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
urls = []
# Image preprocessed url
for i in range(0, 4):
url = f'{public_url}/static/temporary/{files[i]}'
urls.append(url)
# Face shape classification
bentuk, persentase = data_train_pred.prediction(selected_model)
# Gender classification
gender_classify = get_gender('./static/result_upload0.jpg')
output_gender = max(gender_classify, key=lambda x: x['score'])['label']
print(output_gender)
# Hairstyle recommendation
recommended_styles, style_images, hairstyle_description = recommendation.get_recommendation(output_gender, bentuk[0])
hairstyleImage = [f'{public_url}/static/hairstyle_image/{file}' for file in style_images]
response = {'urls': urls,
'bentuk_wajah': bentuk[0],
'persen': persentase,
'gender': output_gender,
'hair_style': recommended_styles,
'hair_image': hairstyleImage,
'hair_description' : hairstyle_description}
return response
# -------------------------------------------------------------------------
# API UNTUK MELAKUKAN PROSES TRAINING
# -------------------------------------------------------------------------
# Model pydantic untuk validasi body
# class TrainingParams(BaseModel):
# optimizer: str
# epoch: int
# batchSize: int
# @app.post('/upload/dataset', tags=["Training"])
# async def upload_data(dataset: UploadFile):
# if dataset.filename == '':
# raise HTTPException(status_code=400, detail='No file selected for uploading')
# # Buat path lengkap untuk menyimpan file
# file_path = os.path.join(UPLOAD_FOLDER, dataset.filename)
# # Simpan file ke folder yang ditentukan
# with open(file_path, "wb") as file_object:
# file_object.write(dataset.file.read())
# # Panggil fungsi untuk mengekstrak file jika perlu
# FileProcess.extract_zip(file_path)
# return {'message': 'File successfully uploaded'}
# @app.post('/set_params', tags=["Training"])
# async def set_params(request: Request, params: TrainingParams):
# global optimizer, epoch, batch_size
# optimizer = params.optimizer
# epoch = params.epoch
# batch_size = params.batchSize
# response = {'message': 'Set parameter sukses'}
# return response
# @app.get('/get_info_data', tags=["Training"])
# def get_info_prepro():
# global optimizer, epoch, batch_size
# training_counts = GetLoadData.get_training_file_counts().json
# testing_counts = GetLoadData.get_testing_file_counts().json
# response = {
# "optimizer": optimizer,
# "epoch": epoch,
# "batch_size": batch_size,
# "training_counts": training_counts,
# "testing_counts": testing_counts
# }
# return response
# @app.get('/get_images_preprocess', tags=["Training"])
# def get_random_images_crop():
# images_face_landmark = GetLoadData.get_random_images(tahap="Face Landmark",public_url=public_url)
# images_face_extraction = GetLoadData.get_random_images(tahap="landmark Extraction", public_url=public_url)
# response = {
# "face_landmark": images_face_landmark,
# "landmark_extraction": images_face_extraction
# }
# return response
# @app.get('/do_preprocessing', tags=["Training"])
# async def do_preprocessing():
# try:
# data_train_pred.do_pre1(test="")
# data_train_pred.do_pre2(test="")
# return {'message': 'Preprocessing sukses'}
# except Exception as e:
# # Tangani kesalahan dan kembalikan respons kesalahan
# error_message = f'Error during preprocessing: {str(e)}'
# raise HTTPException(status_code=500, detail=error_message)
# @app.get('/do_training', tags=["Training"])
# def do_training():
# global epoch
# folder = ""
# if (face_landmark_img == True and landmark_extraction_img == True):
# folder = "Landmark Extraction"
# elif (face_landmark_img == True and landmark_extraction_img == False):
# folder = "Face Landmark"
# # --------------------------------------------------------------
# train_dataset_path = f"./static/dataset/{folder}/Training/"
# test_dataset_path = f"./static/dataset/{folder}/Testing/"
# train_image_df, test_image_df = GetLoadData.load_image_dataset(train_dataset_path, test_dataset_path)
# train_gen, test_gen = data_train_pred.data_configuration(train_image_df, test_image_df)
# model = data_train_pred.model_architecture()
# result = data_train_pred.train_model(model, train_gen, test_gen, epoch)
# # Mengambil nilai akurasi training dan validation dari objek result
# train_acc = result.history['accuracy'][-1]
# val_acc = result.history['val_accuracy'][-1]
# # Plot accuracy
# data_train_pred.plot_accuracy(result=result, epoch=epoch)
# acc_url = f'{public_url}/static/accuracy_plot.png'
# # Plot loss
# data_train_pred.plot_loss(result=result, epoch=epoch)
# loss_url = f'{public_url}/static/loss_plot.png'
# # Confusion Matrix
# data_train_pred.plot_confusion_matrix(model, test_gen)
# conf_url = f'{public_url}/static/confusion_matrix.png'
# return jsonify({'train_acc': train_acc, 'val_acc': val_acc, 'plot_acc': acc_url, 'plot_loss':loss_url,'conf':conf_url})
# -------------------------------------------------------------------------
# API UNTUK PEMILIHAN MODEL
# -------------------------------------------------------------------------
# @app.post('/upload/model', tags=["Model"])
# def upload_model():
# if 'file' not in request.files:
# return {'message': 'No file part in the request'}, 400
# file = request.files['file']
# if file.filename == '':
# return {'message': 'No file selected for uploading'}, 400
# if file and FileProcess.allowed_file(file.filename):
# filename = secure_filename(file.filename)
# filepath = os.path.join(app.config['UPLOAD_MODEL'], filename)
# file.save(filepath)
# return {'message': 'File successfully uploaded'}
# return {'message': 'File failed to uploaded'}
# @app.post('/selected_models')
# def select_models(index: int):
# global selected_model
# try:
# global selected_model
# selected_model = tf.keras.models.load_model(f'models/fc_model_{index}.h5')
# # Lakukan sesuatu dengan indeks yang diterima
# return {'message': 'Request berhasil diterima'}
# except Exception as e:
# raise HTTPException(status_code=500, detail=f'Error: {str(e)}')
if __name__ == '__main__':
import uvicorn
public_url = ngrok.connect(8080).public_url
print(f' * Running on {public_url}')
uvicorn.run(app, host="0.0.0.0", port=8080)
# app = FastAPI()