File size: 5,166 Bytes
49bceed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import os
import random
from typing import Union
import albumentations as A
import cv2
import numpy as np
import pytorch_lightning as pl
import streamlit as st
import torch
from albumentations.pytorch import ToTensorV2
from . import configs
def generate_empty_space(total_space: int = 1) -> None:
for _ in range(total_space):
st.write("")
def set_page_config(page_title: str, page_icon: str) -> None:
st.set_page_config(
page_title=f"{configs.APP_NAME} - {page_title}",
page_icon="🚀",
layout="wide",
initial_sidebar_state="collapsed",
)
st.title(f"{page_icon} {page_title}")
st.caption(f"Created by: {configs.AUTHOR_NAME}")
def set_seed(seed: int = configs.RANDOM_SEED) -> None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
pl.seed_everything(seed)
def euclidean_distance_normalized(x: np.ndarray, y: np.ndarray) -> np.float32:
distance = np.linalg.norm(x - y)
normalized_distance = 1 / (1 + distance)
return normalized_distance
def image_augmentations(image_size: int = configs.SIZE_IMAGES) -> A.Compose:
return A.Compose(
(
A.Resize(image_size, image_size),
A.Normalize(
mean=configs.NORMALIZE_IMAGE_MEAN, std=configs.NORMALIZE_IMAGE_STD
),
ToTensorV2(),
)
)
def normalize_image_to_zero_one(x: np.ndarray) -> np.ndarray:
return np.array((x - np.min(x)) / (np.max(x) - np.min(x)))
def reshape_transform(
tensor: torch.Tensor, height: int = 14, width: int = 14
) -> torch.Tensor:
result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2))
result = result.transpose(2, 3).transpose(1, 2)
return result
def get_device() -> torch.device:
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_default_images() -> tuple:
return_images = []
class_characters = os.listdir(configs.DEFAULT_IMAGES_PATH)
for cls in class_characters:
images_characters = os.listdir(os.path.join(configs.DEFAULT_IMAGES_PATH, cls))
for image in images_characters:
return_images.append(
os.path.join(configs.DEFAULT_IMAGES_PATH, cls, image).replace("\\", "/")
)
return tuple(return_images)
def check_data_type_variable(data, data_type):
if not isinstance(data, data_type):
raise TypeError(f"Data must be {data_type} type")
def get_most_salient_object(image: np.ndarray) -> np.ndarray:
saliency = cv2.saliency.StaticSaliencyFineGrained_create()
_, saliency_map = saliency.computeSaliency(image)
saliency_map = (saliency_map * 255).astype("uint8")
thresh_map = cv2.threshold(
saliency_map, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
)[1]
return thresh_map
def margin_confidence(prob_dist: np.ndarray, sorted: bool = False) -> np.ndarray:
if not sorted:
prob_dist[::-1].sort()
difference = prob_dist[0] - prob_dist[1]
margin_conf = 1 - difference
return margin_conf
def ratio_confidence(prob_dist: np.ndarray, sorted: bool = False) -> np.ndarray:
if not sorted:
prob_dist[::-1].sort()
ratio_conf = prob_dist[1] / prob_dist[0]
return ratio_conf
def least_confidence(prob_dist: np.ndarray, sorted: bool = False) -> np.ndarray:
if sorted:
simple_least_conf = prob_dist[0]
else:
simple_least_conf = np.nanmax(prob_dist)
num_labels = float(prob_dist.size)
normalized_least_conf = (1 - simple_least_conf) * (num_labels / (num_labels - 1))
return normalized_least_conf
def entropy_score(prob_dist: np.ndarray) -> np.ndarray:
log_probs = prob_dist * np.log2(prob_dist)
raw_entropy = 0 - np.sum(log_probs)
normalized_entropy = raw_entropy / np.log2(prob_dist.size)
return normalized_entropy
def active_learning_uncertainty(prob_dist: np.ndarray) -> Union[np.ndarray, bool]:
result_margin_confidence = margin_confidence(prob_dist)
has_nan = np.isnan(result_margin_confidence).any()
if has_nan:
return False
result_ratio_confidence = ratio_confidence(prob_dist)
has_nan = np.isnan(result_ratio_confidence).any()
if has_nan:
return result_margin_confidence
result_least_confidence = least_confidence(prob_dist)
has_nan = np.isnan(result_least_confidence).any()
if has_nan:
return np.mean([result_margin_confidence, result_ratio_confidence])
result_entropy_score = entropy_score(prob_dist)
has_nan = np.isnan(result_entropy_score).any()
if has_nan:
return np.mean([result_margin_confidence, result_ratio_confidence, result_least_confidence])
return np.mean([result_margin_confidence, result_ratio_confidence, result_least_confidence, result_entropy_score])
|