repo
stringlengths
2
99
file
stringlengths
14
239
code
stringlengths
20
3.99M
file_length
int64
20
3.99M
avg_line_length
float64
9.73
128
max_line_length
int64
11
86.4k
extension_type
stringclasses
1 value
vadesc
vadesc-main/utils/radiomics_utils.py
""" Utility functions for extracting radiomics features. """ import os import shutil import numpy as np import cv2 import logging import progressbar from radiomics import featureextractor def extract_radiomics_features(data_file, masks, verbose=1): # Set logging for the radiomics library logger = logging.getLogger("radiomics") logger.setLevel(logging.ERROR) # Load images and segmentation masks images = np.load(file=data_file, allow_pickle=True) print(images.shape, masks.shape) assert images.shape == masks.shape # Create a temporary directory for images and masks if os.path.exists('./radiomics_features_temp'): shutil.rmtree('./radiomics_features_temp') else: os.makedirs('./radiomics_features_temp') n_images = images.shape[0] if verbose: print('Extracting radiomics features...') bar = progressbar.ProgressBar(maxval=n_images) bar.start() # Feature extraction by PyRadiomics extractor = featureextractor.RadiomicsFeatureExtractor() extractor.enableAllFeatures() radiomics_features = None for i in range(n_images): # Create a directory for each image os.makedirs('./radiomics_features_temp/' + str(i)) imageName = './radiomics_features_temp/' + str(i) + '/image.png' maskName = './radiomics_features_temp/' + str(i) + '/mask.png' cv2.imwrite(filename=imageName, img=images[i, 0]) cv2.imwrite(filename=maskName, img=masks[i, 0]) # Provide mask and image files to the extractor result = extractor.execute(imageFilepath=imageName, maskFilepath=maskName) result_features = [val for key, val in result.items() if 'original_' in key and 'diagnostics_' not in key] result_features = [float(r) for r in result_features] if radiomics_features is None: radiomics_features = np.zeros((n_images, len(result_features))) radiomics_features[i] = result_features if verbose > 0: bar.update(i) shutil.rmtree('./radiomics_features_temp') return radiomics_features
2,116
29.242857
114
py
vadesc
vadesc-main/utils/eval_utils.py
""" Utility functions for model evaluation. """ import numpy as np from lifelines.utils import concordance_index import sys from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.metrics.cluster import normalized_mutual_info_score import tensorflow as tf from lifelines import KaplanMeierFitter from scipy import stats from scipy.stats import linregress sys.path.insert(0, '../') def accuracy_metric(inp, p_c_z): y = inp[:, 2] y_pred = tf.math.argmax(p_c_z, axis=-1) return tf.numpy_function(normalized_mutual_info_score, [y, y_pred], tf.float64) def cindex_metric(inp, risk_scores): # Evaluates the concordance index based on provided predicted risk scores, computed using hard clustering # assignments. t = inp[:, 0] d = inp[:, 1] risk_scores = tf.squeeze(risk_scores) return tf.cond(tf.reduce_any(tf.math.is_nan(risk_scores)), lambda: tf.numpy_function(cindex, [t, d, tf.zeros_like(risk_scores)], tf.float64), lambda: tf.numpy_function(cindex, [t, d, risk_scores], tf.float64)) def cindex(t: np.ndarray, d: np.ndarray, scores_pred: np.ndarray): """ Evaluates concordance index based on the given predicted risk scores. :param t: observed time-to-event. :param d: labels of the type of even observed. d[i] == 1, if the i-th event is failure (death); d[i] == 0 otherwise. :param scores_pred: predicted risk/hazard scores. :return: return the concordance index. """ try: ci = concordance_index(event_times=t, event_observed=d, predicted_scores=scores_pred) except ZeroDivisionError: print('Cannot devide by zero.') ci = float(0.5) return ci def rae(t_pred, t_true, cens_t): # Relative absolute error as implemented by Chapfuwa et al. abs_error_i = np.abs(t_pred - t_true) pred_great_empirical = t_pred > t_true min_rea_i = np.minimum(np.divide(abs_error_i, t_true + 1e-8), 1.0) idx_cond = np.logical_and(cens_t, pred_great_empirical) min_rea_i[idx_cond] = 0.0 return np.sum(min_rea_i) / len(t_true) def calibration(predicted_samples, t, d): kmf = KaplanMeierFitter() kmf.fit(t, event_observed=d) range_quant = np.arange(start=0, stop=1.010, step=0.010) t_empirical_range = np.unique(np.sort(np.append(t, [0]))) km_pred_alive_prob = [kmf.predict(i) for i in t_empirical_range] empirical_dead = 1 - np.array(km_pred_alive_prob) km_dead_dist, km_var_dist, km_dist_ci = compute_km_dist(predicted_samples, t_empirical_range=t_empirical_range, event=d) slope, intercept, r_value, p_value, std_err = linregress(x=km_dead_dist, y=empirical_dead) return slope # Bounds def ci_bounds(surv_t, cumulative_sq_, alpha=0.95): # print("surv_t: ", surv_t, "cumulative_sq_: ", cumulative_sq_) # This method calculates confidence intervals using the exponential Greenwood formula. # See https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf # alpha = 0.95 if surv_t > 0.999: surv_t = 1 cumulative_sq_ = 0 alpha = 0.95 constant = 1e-8 alpha2 = stats.norm.ppf((1. + alpha) / 2.) v = np.log(surv_t) left_ci = np.log(-v) right_ci = alpha2 * np.sqrt(cumulative_sq_) * 1 / v c_plus = left_ci + right_ci c_neg = left_ci - right_ci ci_lower = np.exp(-np.exp(c_plus)) ci_upper = np.exp(-np.exp(c_neg)) return [ci_lower, ci_upper] # Population wise cdf def compute_km_dist(predicted_samples, t_empirical_range, event): km_dead = [] km_surv = 1 km_var = [] km_ci = [] km_sum = 0 kernel = [] e_event = event for j in np.arange(len(t_empirical_range)): r = t_empirical_range[j] low = 0 if j == 0 else t_empirical_range[j - 1] area = 0 censored = 0 dead = 0 at_risk = len(predicted_samples) count_death = 0 for i in np.arange(len(predicted_samples)): e = e_event[i] if len(kernel) != len(predicted_samples): kernel_i = stats.gaussian_kde(predicted_samples[i]) kernel.append(kernel_i) else: kernel_i = kernel[i] at_risk = at_risk - kernel_i.integrate_box_1d(low=0, high=low) if e == 1: count_death += kernel_i.integrate_box_1d(low=low, high=r) if at_risk == 0: break km_int_surv = 1 - count_death / at_risk km_int_sum = count_death / (at_risk * (at_risk - count_death)) km_surv = km_surv * km_int_surv km_sum = km_sum + km_int_sum km_ci.append(ci_bounds(cumulative_sq_=km_sum, surv_t=km_surv)) km_dead.append(1 - km_surv) km_var.append(km_surv * km_surv * km_sum) return np.array(km_dead), np.array(km_var), np.array(km_ci) def cluster_acc(y_true, y_pred): """ Calculate clustering accuracy. # Arguments y: true labels, numpy.array with shape `(n_samples,)` y_pred: predicted labels, numpy.array with shape `(n_samples,)` # Return accuracy, in [0,1] """ y_true = y_true.astype(np.int64) assert y_pred.size == y_true.size D = max(y_pred.astype(int).max(), y_true.astype(int).max()) + 1 w = np.zeros((int(D), (D)), dtype=np.int64) for i in range(y_pred.size): w[int(y_pred[i]), int(y_true[i])] += 1 ind = linear_assignment(w.max() - w) return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
5,546
31.063584
120
py
vadesc
vadesc-main/posthoc_explanations/explainer_utils.py
import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler, StandardScaler import keras import math import seaborn as sns import matplotlib.pyplot as plt import matplotlib def Prototypes_sampler(cluster, X, pcz, sample_size, p_threshold): #X = pd.DataFrame(X) # Function to extract prototypes from X assigned to cluster c with high probability (>= pcz_threshold) High_p_c_df = pd.DataFrame(pcz.loc[(pcz.iloc[:,cluster] > p_threshold), cluster]) # make sure we sample always the same prototypes for each cluster np.random.seed(seed=42) # Check if there are enough observations with high probability to sample for the given cluster if len(High_p_c_df) <= sample_size: id_X = High_p_c_df.index else: id_X = High_p_c_df.sample(n=sample_size).index Prototypes_c = X.iloc[id_X] return Prototypes_c, id_X def extract_prototypes_list(X, clusters_labs, pcz, n_prototypes, p_threshold): proto_id_list = [] for cluster in clusters_labs: df, proto_id = Prototypes_sampler(cluster, X, pcz, sample_size = n_prototypes, p_threshold = p_threshold) proto_id_list.append(proto_id) return proto_id_list def build_prototypes_ds(X, num_clusters, proto_id_list): Prototypes_ds = pd.DataFrame() proto_labels = [] for i in range(0,num_clusters): df = X.iloc[proto_id_list[i],:] lab = np.full((np.shape(df)[0],), i) Prototypes_ds = pd.concat([Prototypes_ds, df], axis=0) proto_labels = np.append(proto_labels, lab) return Prototypes_ds, proto_labels def import_hemo_covnames(): cov_names = ['ageStart', 'myspKtV', 'myektv', 'UFR_mLkgh', 'zwtpost', 'CharlsonScore', 'diabetes', 'cardiovascular', 'ctd', 'mean_albumin', 'mean_nPCR', 'mean_ldh', 'mean_creatinine', 'mean_hematocrit', 'mean_iron', 'mean_neutrophils', 'mean_lymphocytes', 'mean_rdw', 'mean_rbc', 'mean_ag_ratio', 'mean_caxphos_c', 'mean_hemoglobin', 'mean_pth', 'mean_uf', 'mean_uf_percent', 'mean_idwg_day', 'mean_preSBP', 'mean_postSBP', 'mean_lowestSBP', 'TBWchild', 'TBWadult', 'BSA', 'cTargetDryWeightKg', 'WeightPostKg', 'spktv_cheek_BSA', 'spktv_cheek_W067', 'spktv_cheek_W075', 'spktv_watson_BSA', 'spktv_watson_W067', 'spktv_watson_W075', 'tidwg2', 'tuf_percent', 'PatientGender_F', 'PatientRace4_African', 'PatientRace4_Caucasian', 'PatientRace4_Hispanic', 'USRDS_class_Cystic/hereditary/congenital diseases', 'USRDS_class_Diabetes', 'USRDS_class_Glomerulonephritis', 'USRDS_class_Hypertensive/large vessel disease', 'USRDS_class_Interstitial nephritis/pyelonephritis', 'USRDS_class_Miscellaneous conditions ', 'USRDS_class_Neoplasms/tumors', 'USRDS_class_Secondary glomerulonephritis/vasculitis', 'fspktv4_(1.39,1.56]', 'fspktv4_(1.56,1.73]', 'fspktv4_(1.73,3.63]', 'fspktv4_[0.784,1.39]'] return cov_names def HemoData_preparation(X): cov_names = import_hemo_covnames() X = pd.DataFrame(X) X.columns = cov_names cov_to_eliminate = ['UFR_mLkgh', 'mean_uf', 'mean_idwg_day', 'mean_postSBP', 'mean_lowestSBP', 'TBWchild', 'TBWadult', 'spktv_watson_W067', 'spktv_watson_W075', 'spktv_watson_BSA', 'spktv_cheek_BSA', 'spktv_cheek_W075', 'tidwg2', 'tuf_percent', 'fspktv4_(1.39,1.56]', 'fspktv4_(1.56,1.73]', 'fspktv4_(1.73,3.63]', 'fspktv4_[0.784,1.39]'] X = X.drop(cov_to_eliminate, axis=1) cov_names = X.columns.values return X.values, cov_names def prepare_summary_plot_data(global_shaps, top_n, prototypes_ds_original, cluster_labels, feature_names): most_rel_shaps_ds = global_shaps.nlargest(top_n) # We extract the id of the most relevant features to retrieve the columns from the raw input data. # This passage is needed to plot the original features distribution in the two clusters of prototypes. id_most_rel = most_rel_shaps_ds.index Proto_mostRel_f_ds = prototypes_ds_original.iloc[:,id_most_rel] Plot_df = pd.concat([Proto_mostRel_f_ds, pd.DataFrame(cluster_labels, columns=["c"])], axis=1) top_feature_names = feature_names[id_most_rel] shap_bar_values = most_rel_shaps_ds.tolist() return top_feature_names, shap_bar_values, Plot_df def plot_topN_features(Plot_df, top_n, top_feature_names, shap_bar_values, unit_measures): CB_COLOR_CYCLE = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] number_gp = top_n def ax_settings(ax, var_name, unit_measure): ax.set_yticks([]) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_edgecolor('#444444') ax.spines['bottom'].set_linewidth(2) ax.set_xlabel(unit_measure, fontsize=16) ax.tick_params(axis='x', labelsize=14) #ax.set_xticklabels(ax.get_xticklabels(), fontsize=4) ax.text(-0.2, 0.1, var_name, fontsize=17, transform = ax.transAxes) return None # Manipulate each axes object in the left. fig = plt.figure(figsize=(18,21)) gs = matplotlib.gridspec.GridSpec(nrows=number_gp, ncols=2, figure=fig, width_ratios= [3, 1], height_ratios= [1]*number_gp, wspace=0.05, hspace=0.6 ) ax = [None]*(number_gp) # Create a figure, partition the figure into boxes, set up an ax array to store axes objects, and create a list of features. for i in range(number_gp): ax[i] = fig.add_subplot(gs[i, 0]) ax_settings(ax[i], str(top_feature_names[i]), str(unit_measures[i])) sns.histplot(data=Plot_df[(Plot_df['c'] == 0)].iloc[:,i], ax=ax[i], stat = 'density', color=CB_COLOR_CYCLE[1], legend=False, alpha=0.6, linewidth=0.1) sns.histplot(data=Plot_df[(Plot_df['c'] == 1)].iloc[:,i], ax=ax[i], stat = 'density', color=CB_COLOR_CYCLE[0], legend=False, alpha=0.6, linewidth=0.1) #if i < (number_gp - 1): # ax[i].set_xticks([]) if i == (number_gp-1): ax[i].text(0.2, -1, 'Covariates Distribution across Clusters', fontsize=18, transform = ax[i].transAxes) ax[0].legend(['Cluster 1', 'Cluster 2'], facecolor='w', loc='upper left', fontsize=15) for i in range(number_gp): ax[i] = fig.add_subplot(gs[i, 1]) ax[i].spines['right'].set_visible(False) ax[i].spines['top'].set_visible(False) ax[i].barh(0, shap_bar_values[i], color=CB_COLOR_CYCLE[-3], height=0.8, align = 'center') ax[i].set_xlim(0 , 0.015) ax[i].set_yticks([]) ax[i].set_ylim(-1,1) if i < (number_gp - 1): ax[i].set_xticks([]) ax[i].spines['bottom'].set_visible(False) if i == (number_gp-1): ax[i].spines['bottom'].set_visible(True) ax[i].tick_params(axis='x', labelrotation= 45, labelsize=13) ax[i].text(-0.01, -1, 'Mean(|Shapley Value|)', fontsize=18, transform = ax[i].transAxes) return fig
7,993
32.033058
158
py
sdmgrad
sdmgrad-main/toy/toy.py
from copy import deepcopy from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm, ticker from matplotlib.colors import LogNorm from tqdm import tqdm from scipy.optimize import minimize, Bounds, minimize_scalar import matplotlib.pyplot as plt import numpy as np import time import torch import torch.nn as nn from torch.optim.lr_scheduler import ExponentialLR import seaborn as sns import sys # Define the Optimization Problem LOWER = 0.000005 class Toy(nn.Module): def __init__(self): super(Toy, self).__init__() self.centers = torch.Tensor([[-3.0, 0], [3.0, 0]]) def forward(self, x, compute_grad=False): x1 = x[0] x2 = x[1] f1 = torch.clamp((0.5 * (-x1 - 7) - torch.tanh(-x2)).abs(), LOWER).log() + 6 f2 = torch.clamp((0.5 * (-x1 + 3) + torch.tanh(-x2) + 2).abs(), LOWER).log() + 6 c1 = torch.clamp(torch.tanh(x2 * 0.5), 0) f1_sq = ((-x1 + 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20 f2_sq = ((-x1 - 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20 c2 = torch.clamp(torch.tanh(-x2 * 0.5), 0) f1 = f1 * c1 + f1_sq * c2 f2 = f2 * c1 + f2_sq * c2 f = torch.tensor([f1, f2]) if compute_grad: g11 = torch.autograd.grad(f1, x1, retain_graph=True)[0].item() g12 = torch.autograd.grad(f1, x2, retain_graph=True)[0].item() g21 = torch.autograd.grad(f2, x1, retain_graph=True)[0].item() g22 = torch.autograd.grad(f2, x2, retain_graph=True)[0].item() g = torch.Tensor([[g11, g21], [g12, g22]]) return f, g else: return f def batch_forward(self, x): x1 = x[:, 0] x2 = x[:, 1] f1 = torch.clamp((0.5 * (-x1 - 7) - torch.tanh(-x2)).abs(), LOWER).log() + 6 f2 = torch.clamp((0.5 * (-x1 + 3) + torch.tanh(-x2) + 2).abs(), LOWER).log() + 6 c1 = torch.clamp(torch.tanh(x2 * 0.5), 0) f1_sq = ((-x1 + 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20 f2_sq = ((-x1 - 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20 c2 = torch.clamp(torch.tanh(-x2 * 0.5), 0) f1 = f1 * c1 + f1_sq * c2 f2 = f2 * c1 + f2_sq * c2 f = torch.cat([f1.view(-1, 1), f2.view(-1, 1)], -1) return f # Plot Utils def plotme(F, all_traj=None, xl=11): n = 500 x = np.linspace(-xl, xl, n) y = np.linspace(-xl, xl, n) X, Y = np.meshgrid(x, y) Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double() Ys = F.batch_forward(Xs) colormaps = { "sgd": "tab:blue", "pcgrad": "tab:orange", "mgd": "tab:cyan", "cagrad": "tab:red", "sdmgrad": "tab:green" } plt.figure(figsize=(12, 5)) plt.subplot(131) c = plt.contour(X, Y, Ys[:, 0].view(n, n)) if all_traj is not None: for i, (k, v) in enumerate(all_traj.items()): plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k) plt.title("L1(x)") plt.subplot(132) c = plt.contour(X, Y, Ys[:, 1].view(n, n)) if all_traj is not None: for i, (k, v) in enumerate(all_traj.items()): plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k) plt.title("L2(x)") plt.subplot(133) c = plt.contour(X, Y, Ys.mean(1).view(n, n)) if all_traj is not None: for i, (k, v) in enumerate(all_traj.items()): plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k) plt.legend() plt.title("0.5*(L1(x)+L2(x))") plt.tight_layout() plt.savefig(f"toy_ct.png") def plot3d(F, xl=11): n = 500 x = np.linspace(-xl, xl, n) y = np.linspace(-xl, xl, n) X, Y = np.meshgrid(x, y) Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double() Ys = F.batch_forward(Xs) fig, ax = plt.subplots(subplot_kw={"projection": "3d"}) ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.grid(False) Yv = Ys.mean(1).view(n, n) surf = ax.plot_surface(X, Y, Yv.numpy(), cmap=cm.viridis) print(Ys.mean(1).min(), Ys.mean(1).max()) ax.set_zticks([-16, -8, 0, 8]) ax.set_zlim(-20, 10) ax.set_xticks([-10, 0, 10]) ax.set_yticks([-10, 0, 10]) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(15) for tick in ax.zaxis.get_major_ticks(): tick.label.set_fontsize(15) ax.view_init(25) plt.tight_layout() plt.savefig(f"3d-obj.png", dpi=1000) def plot_contour(F, task=1, traj=None, xl=11, plotbar=False, name="tmp"): n = 500 x = np.linspace(-xl, xl, n) y = np.linspace(-xl, xl, n) X, Y = np.meshgrid(x, y) fig = plt.figure() ax = fig.add_subplot(111) Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double() Ys = F.batch_forward(Xs) cmap = cm.get_cmap('viridis') yy = -8.3552 if task == 0: Yv = Ys.mean(1) plt.plot(-8.5, 7.5, marker='o', markersize=10, zorder=5, color='k') plt.plot(-8.5, -5, marker='o', markersize=10, zorder=5, color='k') plt.plot(9, 9, marker='o', markersize=10, zorder=5, color='k') plt.plot([-7, 7], [yy, yy], linewidth=8.0, zorder=0, color='gray') plt.plot(0, yy, marker='*', markersize=15, zorder=5, color='k') elif task == 1: Yv = Ys[:, 0] plt.plot(7, yy, marker='*', markersize=15, zorder=5, color='k') else: Yv = Ys[:, 1] plt.plot(-7, yy, marker='*', markersize=15, zorder=5, color='k') c = plt.contour(X, Y, Yv.view(n, n), cmap=cm.viridis, linewidths=4.0) if traj is not None: for tt in traj: l = tt.shape[0] color_list = np.zeros((l, 3)) color_list[:, 0] = 1. color_list[:, 1] = np.linspace(0, 1, l) #color_list[:,2] = 1-np.linspace(0, 1, l) ax.scatter(tt[:, 0], tt[:, 1], color=color_list, s=6, zorder=10) if plotbar: cbar = fig.colorbar(c, ticks=[-15, -10, -5, 0, 5]) cbar.ax.tick_params(labelsize=15) ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable='box') plt.xticks([-10, -5, 0, 5, 10], fontsize=15) plt.yticks([-10, -5, 0, 5, 10], fontsize=15) plt.tight_layout() plt.savefig(f"{name}.png", dpi=100) plt.close() def smooth(x, n=20): l = len(x) y = [] for i in range(l): ii = max(0, i - n) jj = min(i + n, l - 1) v = np.array(x[ii:jj]).astype(np.float64) if i < 3: y.append(x[i]) else: y.append(v.mean()) return y def plot_loss(trajs, name="tmp"): fig = plt.figure() ax = fig.add_subplot(111) colormaps = { "sgd": "tab:blue", "pcgrad": "tab:orange", "mgd": "tab:purple", "cagrad": "tab:red", "sdmgrad": "tab:cyan" } maps = {"sgd": "Adam", "pcgrad": "PCGrad", "mgd": "MGDA", "cagrad": "CAGrad", "sdmgrad": "SDMGrad (Ours)"} for method in ["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]: traj = trajs[method][::100] Ys = F.batch_forward(traj) x = np.arange(traj.shape[0]) #y = torch.cummin(Ys.mean(1), 0)[0] y = Ys.mean(1) ax.plot(x, smooth(list(y)), color=colormaps[method], linestyle='-', label=maps[method], linewidth=4.) plt.xticks([0, 200, 400, 600, 800, 1000], ["0", "20K", "40K", "60K", "80K", "100K"], fontsize=15) plt.yticks(fontsize=15) ax.grid() plt.legend(fontsize=15) ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable='box') plt.tight_layout() plt.savefig(f"{name}.png", dpi=100) plt.close() # Multi-Objective Optimization Solver def mean_grad(grads): return grads.mean(1) def pcgrad(grads): g1 = grads[:, 0] g2 = grads[:, 1] g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() if g12 < 0: return ((1 - g12 / g11) * g1 + (1 - g12 / g22) * g2) / 2 else: return (g1 + g2) / 2 def mgd(grads): g1 = grads[:, 0] g2 = grads[:, 1] g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() if g12 < min(g11, g22): x = (g22 - g12) / (g11 + g22 - 2 * g12 + 1e-8) elif g11 < g22: x = 1 else: x = 0 g_mgd = x * g1 + (1 - x) * g2 # mgd gradient g_mgd return g_mgd def cagrad(grads, c=0.5): g1 = grads[:, 0] g2 = grads[:, 1] g0 = (g1 + g2) / 2 g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12 + 1e-4) # want to minimize g_w^Tg_0 + c*||g_0||*||g_w|| coef = c * g0_norm def obj(x): # g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22 # g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22 return coef * np.sqrt(x**2*(g11+g22-2*g12)+2*x*(g12-g22)+g22+1e-4) + \ 0.5*x*(g11+g22-2*g12)+(0.5+x)*(g12-g22)+g22 res = minimize_scalar(obj, bounds=(0, 1), method='bounded') x = res.x gw = x * g1 + (1 - x) * g2 gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-4) lmbda = coef / (gw_norm + 1e-4) g = g0 + lmbda * gw return g / (1 + c) def sdmgrad(grads, lmbda): g1 = grads[:, 0] g2 = grads[:, 1] g0 = (g1 + g2) / 2 g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() def obj(x): # g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22 # g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22 return (x**2*(g11+g22-2*g12)+2*x*(g12-g22)+g22+1e-4) + \ 2 * lmbda * (0.5*x*(g11+g22-2*g12)+(0.5+x)*(g12-g22)+g22) + \ lmbda**2 * 0.25 * (g11+g22+2*g12+1e-4) res = minimize_scalar(obj, bounds=(0, 1), method='bounded') x = res.x gw = x * g1 + (1 - x) * g2 g = lmbda * g0 + gw return g / (1 + lmbda) def add_noise(grads, coef=0.2): grads_ = grads + coef * torch.randn_like(grads) return grads_ F = Toy() maps = {"sgd": mean_grad, "cagrad": cagrad, "mgd": mgd, "pcgrad": pcgrad, "sdmgrad": sdmgrad} def run_all(): all_traj = {} # the initial positions inits = [ torch.Tensor([-8.5, 7.5]), torch.Tensor([-8.5, -5.]), torch.Tensor([9., 9.]), ] for i, init in enumerate(inits): for m in tqdm(["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]): all_traj[m] = None traj = [] solver = maps[m] x = init.clone() x.requires_grad = True n_iter = 70000 opt = torch.optim.Adam([x], lr=0.002) # scheduler = ExponentialLR(opt, gamma = 0.9999) for it in range(n_iter): traj.append(x.detach().numpy().copy()) # if it % 1000 == 0: # print(f'\niteration {it}, before update x: ', x.detach().numpy().copy()) f, grads = F(x, True) grads = add_noise(grads, coef=0.2) # grads = add_element_noise(grads, coef=1.0, it=it) if m == "cagrad": g = solver(grads, c=0.5) elif m == "sdmgrad": g = solver(grads, lmbda=0.01) else: g = solver(grads) opt.zero_grad() x.grad = g opt.step() # scheduler.step() all_traj[m] = torch.tensor(np.array(traj)) torch.save(all_traj, f"toy{i}.pt") plot_loss(all_traj) plot_results() def plot_results(): plot3d(F) plot_contour(F, 1, name="toy_task_1") plot_contour(F, 2, name="toy_task_2") t1 = torch.load(f"toy0.pt") t2 = torch.load(f"toy1.pt") t3 = torch.load(f"toy2.pt") length = t1["sdmgrad"].shape[0] for method in ["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]: ranges = list(range(10, length, 1000)) ranges.append(length - 1) for t in tqdm(ranges): plot_contour( F, task=0, # task == 0 meeas plot for both tasks traj=[t1[method][:t], t2[method][:t], t3[method][:t]], plotbar=(method == "sdmgrad"), name=f"./imgs/toy_{method}_{t}") if __name__ == "__main__": run_all()
13,100
28.308725
110
py
sdmgrad
sdmgrad-main/mtrl/mtrl_files/sdmgrad.py
from copy import deepcopy from typing import Iterable, List, Optional, Tuple import numpy as np import time import torch from omegaconf import OmegaConf from mtrl.agent import grad_manipulation as grad_manipulation_agent from mtrl.utils.types import ConfigType, TensorType #from mtrl.agent.mgda import MinNormSolver def euclidean_proj_simplex(v, s=1): """ Compute the Euclidean projection on a positive simplex Solves the optimisation problem (using the algorithm from [1]): min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0 Parameters ---------- v: (n,) numpy array, n-dimensional vector to project s: int, optional, default: 1, radius of the simplex Returns ------- w: (n,) numpy array, Euclidean projection of v on the simplex Notes ----- The complexity of this algorithm is in O(n log(n)) as it involves sorting v. Better alternatives exist for high-dimensional sparse vectors (cf. [1]) However, this implementation still easily scales to millions of dimensions. References ---------- [1] Efficient Projections onto the .1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf [2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541 https://arxiv.org/pdf/1309.1541.pdf [3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py """ assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s v = v.astype(np.float64) n, = v.shape # will raise ValueError if v is not 1-D # check if we are already on the simplex if v.sum() == s and np.alltrue(v >= 0): # best projection: itself! return v # get the array of cumulative sums of a sorted (decreasing) copy of v u = np.sort(v)[::-1] cssv = np.cumsum(u) # get the number of > 0 components of the optimal solution rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1] # compute the Lagrange multiplier associated to the simplex constraint theta = float(cssv[rho] - s) / (rho + 1) # compute the projection by thresholding v using theta w = (v - theta).clip(min=0) return w def _check_param_device(param: TensorType, old_param_device: Optional[int]) -> int: """This helper function is to check if the parameters are located in the same device. Currently, the conversion between model parameters and single vector form is not supported for multiple allocations, e.g. parameters in different GPUs, or mixture of CPU/GPU. The implementation is taken from: https://github.com/pytorch/pytorch/blob/22a34bcf4e5eaa348f0117c414c3dd760ec64b13/torch/nn/utils/convert_parameters.py#L57 Args: param ([TensorType]): a Tensor of a parameter of a model. old_param_device ([int]): the device where the first parameter of a model is allocated. Returns: old_param_device (int): report device for the first time """ # Meet the first parameter if old_param_device is None: old_param_device = param.get_device() if param.is_cuda else -1 else: warn = False if param.is_cuda: # Check if in same GPU warn = param.get_device() != old_param_device else: # Check if in CPU warn = old_param_device != -1 if warn: raise TypeError("Found two parameters on different devices, " "this is currently not supported.") return old_param_device def apply_vector_grad_to_parameters(vec: TensorType, parameters: Iterable[TensorType], accumulate: bool = False): """Apply vector gradients to the parameters Args: vec (TensorType): a single vector represents the gradients of a model. parameters (Iterable[TensorType]): an iterator of Tensors that are the parameters of a model. """ # Ensure vec of type Tensor if not isinstance(vec, torch.Tensor): raise TypeError("expected torch.Tensor, but got: {}".format(torch.typename(vec))) # Flag for the device where the parameter is located param_device = None # Pointer for slicing the vector for each parameter pointer = 0 for param in parameters: # Ensure the parameters are located in the same device param_device = _check_param_device(param, param_device) # The length of the parameter num_param = param.numel() # Slice the vector, reshape it, and replace the old grad of the parameter if accumulate: param.grad = (param.grad + vec[pointer:pointer + num_param].view_as(param).data) else: param.grad = vec[pointer:pointer + num_param].view_as(param).data # Increment the pointer pointer += num_param class Agent(grad_manipulation_agent.Agent): def __init__( self, env_obs_shape: List[int], action_shape: List[int], action_range: Tuple[int, int], device: torch.device, agent_cfg: ConfigType, multitask_cfg: ConfigType, cfg_to_load_model: Optional[ConfigType] = None, should_complete_init: bool = True, ): """Regularized gradient algorithm.""" agent_cfg_copy = deepcopy(agent_cfg) del agent_cfg_copy['sdmgrad_lmbda'] del agent_cfg_copy['sdmgrad_method'] OmegaConf.set_struct(agent_cfg_copy, False) agent_cfg_copy.cfg_to_load_model = None agent_cfg_copy.should_complete_init = False agent_cfg_copy.loss_reduction = "none" OmegaConf.set_struct(agent_cfg_copy, True) super().__init__( env_obs_shape=env_obs_shape, action_shape=action_shape, action_range=action_range, multitask_cfg=multitask_cfg, agent_cfg=agent_cfg_copy, device=device, ) self.agent._compute_gradient = self._compute_gradient self._rng = np.random.default_rng() self.sdmgrad_lmbda = agent_cfg['sdmgrad_lmbda'] self.sdmgrad_method = agent_cfg['sdmgrad_method'] fn_maps = { "sdmgrad": self.sdmgrad, } for k in range(2, 50): fn_maps[f"sdmgrad_os{k}"] = self.sdmgrad_os fn_names = ", ".join(fn_maps.keys()) assert self.sdmgrad_method in fn_maps, \ f"[error] unrealized fn {self.sdmgrad_method}, currently we have {fn_names}" self.sdmgrad_fn = fn_maps[self.sdmgrad_method] self.wi_map = {} self.num_param_block = -1 self.conflicts = [] self.last_w = None self.save_target = 500000 if "os" in self.sdmgrad_method: num_tasks = multitask_cfg['num_envs'] self.os_n = int(self.sdmgrad_method[self.sdmgrad_method.find("os") + 2:]) if should_complete_init: self.complete_init(cfg_to_load_model=cfg_to_load_model) def _compute_gradient( self, loss: TensorType, # batch x 1 parameters: List[TensorType], step: int, component_names: List[str], env_metadata: grad_manipulation_agent.EnvMetadata, retain_graph: bool = False, allow_unused: bool = False, ) -> None: #t0 = time.time() task_loss = self._convert_loss_into_task_loss(loss=loss, env_metadata=env_metadata) num_tasks = task_loss.shape[0] grad = [] if "os" in self.sdmgrad_method: n = self.os_n while True: idx = np.random.binomial(1, n / num_tasks, num_tasks) sample_idx = np.where(idx == 1)[0] n_sample = sample_idx.shape[0] if n_sample: break losses = [0] * n_sample for j in range(n_sample): losses[j] = task_loss[sample_idx[j]] for loss in losses: grad.append( tuple(_grad.contiguous() for _grad in torch.autograd.grad( loss, parameters, retain_graph=True, allow_unused=allow_unused, ))) else: for index in range(num_tasks): grad.append( tuple(_grad.contiguous() for _grad in torch.autograd.grad( task_loss[index], parameters, retain_graph=(retain_graph or index != num_tasks - 1), allow_unused=allow_unused, ))) grad_vec = torch.cat( list(map(lambda x: torch.nn.utils.parameters_to_vector(x).unsqueeze(0), grad)), dim=0, ) # num_tasks x dim regularized_grad = self.sdmgrad_fn(grad_vec, num_tasks) apply_vector_grad_to_parameters(regularized_grad, parameters) def sdmgrad(self, grad_vec, num_tasks): """ grad_vec: [num_tasks, dim] """ grads = grad_vec GG = torch.mm(grads, grads.t()).cpu() scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4)) GG = GG / scale.pow(2) Gg = torch.mean(GG, dim=1) gg = torch.mean(Gg) w = torch.ones(num_tasks) / num_tasks w.requires_grad = True if num_tasks == 50: w_opt = torch.optim.SGD([w], lr=50, momentum=0.5) else: w_opt = torch.optim.SGD([w], lr=25, momentum=0.5) lmbda = self.sdmgrad_lmbda w_best = None obj_best = np.inf for i in range(21): w_opt.zero_grad() obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg if obj.item() < obj_best: obj_best = obj.item() w_best = w.clone() if i < 20: obj.backward() w_opt.step() proj = euclidean_proj_simplex(w.data.cpu().numpy()) w.data.copy_(torch.from_numpy(proj).data) g0 = torch.mean(grads, dim=0) gw = torch.mv(grads.t(), w_best.to(grads.device)) g = (gw + lmbda * g0) / (1 + lmbda) return g def sdmgrad_os(self, grad_vec, num_tasks): """ objective sampling grad_vec: [num_tasks, dim] """ grads = grad_vec n = grads.size(0) GG = torch.mm(grads, grads.t()).cpu() scale = (torch.diag(GG) + 1e-4).sqrt().mean() GG = GG / scale.pow(2) Gg = torch.mean(GG, dim=1) gg = torch.mean(Gg) w = torch.ones(n) / n w.requires_grad = True w_opt = torch.optim.SGD([w], lr=50, momentum=0.5) lmbda = self.sdmgrad_lmbda w_best = None obj_best = np.inf for i in range(21): w_opt.zero_grad() obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg if obj.item() < obj_best: obj_best = obj.item() w_best = w.clone() if i < 20: obj.backward() w_opt.step() proj = euclidean_proj_simplex(w.data.cpu().numpy()) w.data.copy_(torch.from_numpy(proj).data) g0 = torch.mean(grads, dim=0) gw = torch.mv(grads.t(), w_best.to(grads.device)) g = (gw + lmbda * g0) / (1 + lmbda) return g
11,791
35.965517
163
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_single.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Single-task: One Task') parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--seed', default=0, type=int, help='the seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) if opt.task == 'semantic': self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True) if opt.task == 'depth': self.pred_task = self.conv_layer([filter[0], 1], pred=True) if opt.task == 'normal': self.pred_task = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task prediction layers if opt.task == 'semantic': pred = F.log_softmax(self.pred_task(g_decoder[-1][-1]), dim=1) if opt.task == 'depth': pred = self.pred_task(g_decoder[-1][-1]) if opt.task == 'normal': pred = self.pred_task(g_decoder[-1][-1]) pred = pred / torch.norm(pred, p=2, dim=1, keepdim=True) return pred # control seed torch.backends.cudnn.enabled = False torch.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) torch.cuda.manual_seed_all(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet = SegNet().to(device) optimizer = optim.Adam(SegNet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet), count_parameters(SegNet) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) # Train and evaluate single-task network single_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet, device, optimizer, scheduler, opt, 200)
6,820
43.292208
120
py
sdmgrad
sdmgrad-main/nyuv2/evaluate.py
import matplotlib import matplotlib.pyplot as plt import seaborn as sns import numpy as np import torch import itertools methods = [ "sdmgrad-1e-1", "sdmgrad-2e-1", "sdmgrad-3e-1", "sdmgrad-4e-1", "sdmgrad-5e-1", "sdmgrad-6e-1", "sdmgrad-7e-1", "sdmgrad-8e-1", "sdmgrad-9e-1", "sdmgrad-1e0" ] colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "tab:green", "tab:cyan", "tab:blue", "tab:red"] stats = [ "semantic loss", "mean iou", "pix acc", "depth loss", "abs err", "rel err", "normal loss", "mean", "median", "<11.25", "<22.5", "<30" ] delta_stats = ["mean iou", "pix acc", "abs err", "rel err", "mean", "median", "<11.25", "<22.5", "<30"] stats_idx_map = [4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17] time_idx = 34 # change random seeds used in the experiments here seeds = [0, 1, 2] logs = {} min_epoch = 100000 for m in methods: logs[m] = {"train": [None for _ in range(3)], "test": [None for _ in range(3)]} for seed in seeds: logs[m]["train"][seed] = {} logs[m]["test"][seed] = {} for stat in stats: for seed in seeds: logs[m]["train"][seed][stat] = [] logs[m]["test"][seed][stat] = [] for seed in seeds: logs[m]["train"][seed]["time"] = [] for seed in seeds: fname = f"logs/{m}-sd{seed}.log" with open(fname, "r") as f: lines = f.readlines() for line in lines: if line.startswith("Epoch"): ws = line.split(" ") for i, stat in enumerate(stats): logs[m]["train"][seed][stat].append(float(ws[stats_idx_map[i]])) logs[m]["test"][seed][stat].append(float(ws[stats_idx_map[i] + 15])) logs[m]["train"][seed]["time"].append(float(ws[time_idx])) min_epoch = min(min(min_epoch, len(logs[m]["train"][seed]["semantic loss"])), len(logs[m]["test"][seed]["semantic loss"])) test_stats = {} train_stats = {} learning_time = {} print(" " * 25 + " | ".join([f"{s:5s}" for s in stats])) for mi, mode in enumerate(["train", "test"]): if mi == 1: print(mode) for mmi, m in enumerate(methods): if m not in test_stats: test_stats[m] = {} train_stats[m] = {} string = f"{m:30s} " for stat in stats: x = [] for seed in seeds: x.append(np.array(logs[m][mode][seed][stat][min_epoch - 10:min_epoch]).mean()) x = np.array(x) if mode == "test": test_stats[m][stat] = x.copy() else: train_stats[m][stat] = x.copy() mu = x.mean() std = x.std() / np.sqrt(3) string += f" | {mu:5.4f}" if mode == "test": print(string) for m in methods: learning_time[m] = np.array([np.array(logs[m]["train"][sd]["time"]).mean() for sd in seeds]) for method in methods: average_loss = np.mean([ train_stats[method]["semantic loss"].mean(), train_stats[method]["depth loss"].mean(), train_stats[method]["normal loss"].mean() ]) print(f"{method} average training loss {average_loss}") base = np.array([0.3830, 0.6376, 0.6754, 0.2780, 25.01, 19.21, 0.3014, 0.5720, 0.6915]) sign = np.array([1, 1, 0, 0, 0, 0, 1, 1, 1]) kk = np.ones(9) * -1 def delta_fn(a): return (kk**sign * (a - base) / base).mean() * 100. # *100 for percentage deltas = {} for method in methods: tmp = np.zeros(9) for i, stat in enumerate(delta_stats): tmp[i] = test_stats[method][stat].mean() deltas[method] = delta_fn(tmp) print(f"{method:30s} delta: {deltas[method]:4.3f}")
3,777
30.747899
117
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_stan.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Single-task: Attention Network') parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(1): for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) if opt.task == 'semantic': self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True) if opt.task == 'depth': self.pred_task = self.conv_layer([filter[0], 1], pred=True) if opt.task == 'normal': self.pred_task = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for two tasks atten_encoder, atten_decoder = ([0] * 3 for _ in range(2)) for i in range(3): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(3): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(1): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers if opt.task == 'semantic': pred = F.log_softmax(self.pred_task(atten_decoder[0][-1][-1]), dim=1) if opt.task == 'depth': pred = self.pred_task(atten_decoder[0][-1][-1]) if opt.task == 'normal': pred = self.pred_task(atten_decoder[0][-1][-1]) pred = pred / torch.norm(pred, p=2, dim=1, keepdim=True) return pred # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_STAN = SegNet().to(device) optimizer = optim.Adam(SegNet_STAN.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_STAN), count_parameters(SegNet_STAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) # Train and evaluate single-task network single_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_STAN, device, optimizer, scheduler, opt, 200)
11,017
49.310502
119
py
sdmgrad
sdmgrad-main/nyuv2/utils.py
import numpy as np import time import torch import torch.nn.functional as F from copy import deepcopy from min_norm_solvers import MinNormSolver from scipy.optimize import minimize, Bounds, minimize_scalar def euclidean_proj_simplex(v, s=1): """ Compute the Euclidean projection on a positive simplex Solves the optimisation problem (using the algorithm from [1]): min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0 Parameters ---------- v: (n,) numpy array, n-dimensional vector to project s: int, optional, default: 1, radius of the simplex Returns ------- w: (n,) numpy array, Euclidean projection of v on the simplex Notes ----- The complexity of this algorithm is in O(n log(n)) as it involves sorting v. Better alternatives exist for high-dimensional sparse vectors (cf. [1]) However, this implementation still easily scales to millions of dimensions. References ---------- [1] Efficient Projections onto the .1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf [2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541 https://arxiv.org/pdf/1309.1541.pdf [3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py """ assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s v = v.astype(np.float64) n, = v.shape # will raise ValueError if v is not 1-D # check if we are already on the simplex if v.sum() == s and np.alltrue(v >= 0): # best projection: itself! return v # get the array of cumulative sums of a sorted (decreasing) copy of v u = np.sort(v)[::-1] cssv = np.cumsum(u) # get the number of > 0 components of the optimal solution rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1] # compute the Lagrange multiplier associated to the simplex constraint theta = float(cssv[rho] - s) / (rho + 1) # compute the projection by thresholding v using theta w = (v - theta).clip(min=0) return w """ Define task metrics, loss functions and model trainer here. """ def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def model_fit(x_pred, x_output, task_type): device = x_pred.device # binary mark to mask out undefined pixel space binary_mask = (torch.sum(x_output, dim=1) != 0).float().unsqueeze(1).to(device) if task_type == 'semantic': # semantic loss: depth-wise cross entropy loss = F.nll_loss(x_pred, x_output, ignore_index=-1) if task_type == 'depth': # depth loss: l1 norm loss = torch.sum(torch.abs(x_pred - x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0) if task_type == 'normal': # normal loss: dot product loss = 1 - torch.sum((x_pred * x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0) return loss # Legacy: compute mIoU and Acc. for each image and average across all images. # def compute_miou(x_pred, x_output): # _, x_pred_label = torch.max(x_pred, dim=1) # x_output_label = x_output # batch_size = x_pred.size(0) # class_nb = x_pred.size(1) # device = x_pred.device # for i in range(batch_size): # true_class = 0 # first_switch = True # invalid_mask = (x_output[i] >= 0).float() # for j in range(class_nb): # pred_mask = torch.eq(x_pred_label[i], j * torch.ones(x_pred_label[i].shape).long().to(device)) # true_mask = torch.eq(x_output_label[i], j * torch.ones(x_output_label[i].shape).long().to(device)) # mask_comb = pred_mask.float() + true_mask.float() # union = torch.sum((mask_comb > 0).float() * invalid_mask) # remove non-defined pixel predictions # intsec = torch.sum((mask_comb > 1).float()) # if union == 0: # continue # if first_switch: # class_prob = intsec / union # first_switch = False # else: # class_prob = intsec / union + class_prob # true_class += 1 # if i == 0: # batch_avg = class_prob / true_class # else: # batch_avg = class_prob / true_class + batch_avg # return batch_avg / batch_size # def compute_iou(x_pred, x_output): # _, x_pred_label = torch.max(x_pred, dim=1) # x_output_label = x_output # batch_size = x_pred.size(0) # for i in range(batch_size): # if i == 0: # pixel_acc = torch.div( # torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()), # torch.sum((x_output_label[i] >= 0).float())) # else: # pixel_acc = pixel_acc + torch.div( # torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()), # torch.sum((x_output_label[i] >= 0).float())) # return pixel_acc / batch_size # New mIoU and Acc. formula: accumulate every pixel and average across all pixels in all images class ConfMatrix(object): def __init__(self, num_classes): self.num_classes = num_classes self.mat = None def update(self, pred, target): n = self.num_classes if self.mat is None: self.mat = torch.zeros((n, n), dtype=torch.int64, device=pred.device) with torch.no_grad(): k = (target >= 0) & (target < n) inds = n * target[k].to(torch.int64) + pred[k] self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) def get_metrics(self): h = self.mat.float() acc = torch.diag(h).sum() / h.sum() iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h)) return torch.mean(iu).item(), acc.item() def depth_error(x_pred, x_output): device = x_pred.device binary_mask = (torch.sum(x_output, dim=1) != 0).unsqueeze(1).to(device) x_pred_true = x_pred.masked_select(binary_mask) x_output_true = x_output.masked_select(binary_mask) abs_err = torch.abs(x_pred_true - x_output_true) rel_err = torch.abs(x_pred_true - x_output_true) / x_output_true return (torch.sum(abs_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item(), \ (torch.sum(rel_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item() def normal_error(x_pred, x_output): binary_mask = (torch.sum(x_output, dim=1) != 0) error = torch.acos(torch.clamp(torch.sum(x_pred * x_output, 1).masked_select(binary_mask), -1, 1)).detach().cpu().numpy() error = np.degrees(error) return np.mean(error), np.median(error), np.mean(error < 11.25), np.mean(error < 22.5), np.mean(error < 30) """ =========== Universal Multi-task Trainer =========== """ def multi_task_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200): start_time = time.time() train_batch = len(train_loader) test_batch = len(test_loader) T = opt.temp avg_cost = np.zeros([total_epoch, 24], dtype=np.float32) lambda_weight = np.ones([3, total_epoch]) for index in range(total_epoch): epoch_start_time = time.time() cost = np.zeros(24, dtype=np.float32) # apply Dynamic Weight Average if opt.weight == 'dwa': if index == 0 or index == 1: lambda_weight[:, index] = 1.0 else: w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0] w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3] w_3 = avg_cost[index - 1, 6] / avg_cost[index - 2, 6] lambda_weight[0, index] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) lambda_weight[1, index] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) lambda_weight[2, index] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) # iteration for all batches multi_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(multi_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth, train_normal = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth, train_normal = train_depth.to(device), train_normal.to(device) train_pred, logsigma = multi_task_model(train_data) optimizer.zero_grad() train_loss = [ model_fit(train_pred[0], train_label, 'semantic'), model_fit(train_pred[1], train_depth, 'depth'), model_fit(train_pred[2], train_normal, 'normal') ] if opt.weight == 'equal' or opt.weight == 'dwa': loss = sum([lambda_weight[i, index] * train_loss[i] for i in range(3)]) #loss = sum([w[i] * train_loss[i] for i in range(3)]) else: loss = sum(1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 for i in range(3)) loss.backward() optimizer.step() # accumulate label prediction for every pixel in training images conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss[0].item() cost[3] = train_loss[1].item() cost[4], cost[5] = depth_error(train_pred[1], train_depth) cost[6] = train_loss[2].item() cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred[2], train_normal) avg_cost[index, :12] += cost[:12] / train_batch # compute mIoU and acc avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data multi_task_model.eval() conf_mat = ConfMatrix(multi_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth, test_normal = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth, test_normal = test_depth.to(device), test_normal.to(device) test_pred, _ = multi_task_model(test_data) test_loss = [ model_fit(test_pred[0], test_label, 'semantic'), model_fit(test_pred[1], test_depth, 'depth'), model_fit(test_pred[2], test_normal, 'normal') ] conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten()) cost[12] = test_loss[0].item() cost[15] = test_loss[1].item() cost[16], cost[17] = depth_error(test_pred[1], test_depth) cost[18] = test_loss[2].item() cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred[2], test_normal) avg_cost[index, 12:] += cost[12:] / test_batch # compute mIoU and acc avg_cost[index, 13:15] = conf_mat.get_metrics() scheduler.step() epoch_end_time = time.time() print( 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} ||' 'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} | {:.4f}'. format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13], avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18], avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], epoch_end_time - epoch_start_time)) end_time = time.time() print("Training time: ", end_time - start_time) """ =========== Universal Single-task Trainer =========== """ def single_task_trainer(train_loader, test_loader, single_task_model, device, optimizer, scheduler, opt, total_epoch=200): train_batch = len(train_loader) test_batch = len(test_loader) avg_cost = np.zeros([total_epoch, 24], dtype=np.float32) for index in range(total_epoch): cost = np.zeros(24, dtype=np.float32) # iteration for all batches single_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(single_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth, train_normal = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth, train_normal = train_depth.to(device), train_normal.to(device) train_pred = single_task_model(train_data) optimizer.zero_grad() if opt.task == 'semantic': train_loss = model_fit(train_pred, train_label, opt.task) train_loss.backward() optimizer.step() conf_mat.update(train_pred.argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss.item() if opt.task == 'depth': train_loss = model_fit(train_pred, train_depth, opt.task) train_loss.backward() optimizer.step() cost[3] = train_loss.item() cost[4], cost[5] = depth_error(train_pred, train_depth) if opt.task == 'normal': train_loss = model_fit(train_pred, train_normal, opt.task) train_loss.backward() optimizer.step() cost[6] = train_loss.item() cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred, train_normal) avg_cost[index, :12] += cost[:12] / train_batch if opt.task == 'semantic': avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data single_task_model.eval() conf_mat = ConfMatrix(single_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth, test_normal = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth, test_normal = test_depth.to(device), test_normal.to(device) test_pred = single_task_model(test_data) if opt.task == 'semantic': test_loss = model_fit(test_pred, test_label, opt.task) conf_mat.update(test_pred.argmax(1).flatten(), test_label.flatten()) cost[12] = test_loss.item() if opt.task == 'depth': test_loss = model_fit(test_pred, test_depth, opt.task) cost[15] = test_loss.item() cost[16], cost[17] = depth_error(test_pred, test_depth) if opt.task == 'normal': test_loss = model_fit(test_pred, test_normal, opt.task) cost[18] = test_loss.item() cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred, test_normal) avg_cost[index, 12:] += cost[12:] / test_batch if opt.task == 'semantic': avg_cost[index, 13:15] = conf_mat.get_metrics() scheduler.step() if opt.task == 'semantic': print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format( index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 12], avg_cost[index, 13], avg_cost[index, 14])) if opt.task == 'depth': print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format( index, avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17])) if opt.task == 'normal': print( 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}' .format(index, avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 18], avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23])) ''' ===== multi task MGD trainer ==== ''' def multi_task_mgd_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200, method='sumloss', alpha=0.5, seed=0): start_time = time.time() niter = opt.niter def graddrop(grads): P = 0.5 * (1. + grads.sum(1) / (grads.abs().sum(1) + 1e-8)) U = torch.rand_like(grads[:, 0]) M = P.gt(U).view(-1, 1) * grads.gt(0) + P.lt(U).view(-1, 1) * grads.lt(0) g = (grads * M.float()).mean(1) return g def mgd(grads): grads_cpu = grads.t().cpu() sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])]) w = torch.FloatTensor(sol).to(grads.device) g = grads.mm(w.view(-1, 1)).view(-1) return g def pcgrad(grads, rng): grad_vec = grads.t() num_tasks = 3 shuffled_task_indices = np.zeros((num_tasks, num_tasks - 1), dtype=int) for i in range(num_tasks): task_indices = np.arange(num_tasks) task_indices[i] = task_indices[-1] shuffled_task_indices[i] = task_indices[:-1] rng.shuffle(shuffled_task_indices[i]) shuffled_task_indices = shuffled_task_indices.T normalized_grad_vec = grad_vec / (grad_vec.norm(dim=1, keepdim=True) + 1e-8) # num_tasks x dim modified_grad_vec = deepcopy(grad_vec) for task_indices in shuffled_task_indices: normalized_shuffled_grad = normalized_grad_vec[task_indices] # num_tasks x dim dot = (modified_grad_vec * normalized_shuffled_grad).sum(dim=1, keepdim=True) # num_tasks x dim modified_grad_vec -= torch.clamp_max(dot, 0) * normalized_shuffled_grad g = modified_grad_vec.mean(dim=0) return g def cagrad(grads, alpha=0.5, rescale=1): GG = grads.t().mm(grads).cpu() # [num_tasks, num_tasks] g0_norm = (GG.mean() + 1e-8).sqrt() # norm of the average gradient x_start = np.ones(3) / 3 bnds = tuple((0, 1) for x in x_start) cons = ({'type': 'eq', 'fun': lambda x: 1 - sum(x)}) A = GG.numpy() b = x_start.copy() c = (alpha * g0_norm + 1e-8).item() def objfn(x): return (x.reshape(1, 3).dot(A).dot(b.reshape(3, 1)) + c * np.sqrt(x.reshape(1, 3).dot(A).dot(x.reshape(3, 1)) + 1e-8)).sum() res = minimize(objfn, x_start, bounds=bnds, constraints=cons) w_cpu = res.x ww = torch.Tensor(w_cpu).to(grads.device) gw = (grads * ww.view(1, -1)).sum(1) gw_norm = gw.norm() lmbda = c / (gw_norm + 1e-8) g = grads.mean(1) + lmbda * gw if rescale == 0: return g elif rescale == 1: return g / (1 + alpha**2) else: return g / (1 + alpha) def sdmgrad(w, grads, alpha, niter=20): GG = torch.mm(grads.t(), grads) scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4)) GG = GG / scale.pow(2) Gg = torch.mean(GG, dim=1) gg = torch.mean(Gg) w.requires_grad = True optimizer = torch.optim.SGD([w], lr=10, momentum=0.5) for i in range(niter): optimizer.zero_grad() obj = torch.dot(w, torch.mv(GG, w)) + 2 * alpha * torch.dot(w, Gg) + alpha**2 * gg obj.backward() optimizer.step() proj = euclidean_proj_simplex(w.data.cpu().numpy()) w.data.copy_(torch.from_numpy(proj).data) w.requires_grad = False g0 = torch.mean(grads, dim=1) gw = torch.mv(grads, w) g = (gw + alpha * g0) / (1 + alpha) return g def grad2vec(m, grads, grad_dims, task): # store the gradients grads[:, task].fill_(0.0) cnt = 0 for mm in m.shared_modules(): for p in mm.parameters(): grad = p.grad if grad is not None: grad_cur = grad.data.detach().clone() beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) grads[beg:en, task].copy_(grad_cur.data.view(-1)) cnt += 1 def overwrite_grad(m, newgrad, grad_dims): newgrad = newgrad * 3 # to match the sum loss cnt = 0 for mm in m.shared_modules(): for param in mm.parameters(): beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) this_grad = newgrad[beg:en].contiguous().view(param.data.size()) param.grad = this_grad.data.clone() cnt += 1 rng = np.random.default_rng() grad_dims = [] for mm in multi_task_model.shared_modules(): for param in mm.parameters(): grad_dims.append(param.data.numel()) grads = torch.Tensor(sum(grad_dims), 3).cuda() w = 1 / 3 * torch.ones(3).cuda() train_batch = len(train_loader) test_batch = len(test_loader) T = opt.temp avg_cost = np.zeros([total_epoch, 24], dtype=np.float32) lambda_weight = np.ones([3, total_epoch]) neg_trace = [] obj_trace = [] for index in range(total_epoch): epoch_start_time = time.time() cost = np.zeros(24, dtype=np.float32) # apply Dynamic Weight Average if opt.weight == 'dwa': if index == 0 or index == 1: lambda_weight[:, index] = 1.0 else: w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0] w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3] w_3 = avg_cost[index - 1, 6] / avg_cost[index - 2, 6] lambda_weight[0, index] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) lambda_weight[1, index] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) lambda_weight[2, index] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T)) # iteration for all batches multi_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(multi_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth, train_normal = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth, train_normal = train_depth.to(device), train_normal.to(device) train_pred, logsigma = multi_task_model(train_data) train_loss = [ model_fit(train_pred[0], train_label, 'semantic'), model_fit(train_pred[1], train_depth, 'depth'), model_fit(train_pred[2], train_normal, 'normal') ] train_loss_tmp = [0, 0, 0] if opt.weight == 'equal' or opt.weight == 'dwa': for i in range(3): train_loss_tmp[i] = train_loss[i] * lambda_weight[i, index] else: for i in range(3): train_loss_tmp[i] = 1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 optimizer.zero_grad() if method == "graddrop": for i in range(3): if i < 3: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = graddrop(grads) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "mgd": for i in range(3): if i < 3: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = mgd(grads) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "pcgrad": for i in range(3): if i < 3: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = pcgrad(grads, rng) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "cagrad": for i in range(3): if i < 3: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = cagrad(grads, alpha, rescale=1) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "sdmgrad": for i in range(3): if i < 3: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = sdmgrad(w, grads, alpha, niter=niter) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() # accumulate label prediction for every pixel in training images conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss[0].item() cost[3] = train_loss[1].item() cost[4], cost[5] = depth_error(train_pred[1], train_depth) cost[6] = train_loss[2].item() cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred[2], train_normal) avg_cost[index, :12] += cost[:12] / train_batch # compute mIoU and acc avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data multi_task_model.eval() conf_mat = ConfMatrix(multi_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth, test_normal = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth, test_normal = test_depth.to(device), test_normal.to(device) test_pred, _ = multi_task_model(test_data) test_loss = [ model_fit(test_pred[0], test_label, 'semantic'), model_fit(test_pred[1], test_depth, 'depth'), model_fit(test_pred[2], test_normal, 'normal') ] conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten()) cost[12] = test_loss[0].item() cost[15] = test_loss[1].item() cost[16], cost[17] = depth_error(test_pred[1], test_depth) cost[18] = test_loss[2].item() cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred[2], test_normal) avg_cost[index, 12:] += cost[12:] / test_batch # compute mIoU and acc avg_cost[index, 13:15] = conf_mat.get_metrics() scheduler.step() if method == "mean": torch.save(torch.Tensor(neg_trace), "trace.pt") if "debug" in method: torch.save(torch.Tensor(obj_trace), f"{method}_obj.pt") epoch_end_time = time.time() print( 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} ||' 'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} | {:.4f}'. format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13], avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18], avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], epoch_end_time - epoch_start_time)) if "cagrad" in method: torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{seed}.pt") elif "sdmgrad" in method: torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{seed}-{niter}.pt") else: torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{seed}.pt") end_time = time.time() print("Training time: ", end_time - start_time)
31,500
43.242978
130
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_split.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Split') parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--seed', default=0, type=int, help='the seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters if opt.type == 'wide': filter = [64, 128, 256, 512, 1024] else: filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task specific layers self.pred_task1 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=self.class_nb, kernel_size=1, padding=0)) self.pred_task2 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=1, kernel_size=1, padding=0)) self.pred_task3 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=3, kernel_size=1, padding=0)) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) # define convolutional block def conv_layer(self, channel): if opt.type == 'deep': conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True)) return conv_block def forward(self, x): import pdb pdb.set_trace() g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # global shared encoder-decoder network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(g_decoder[i][1]), dim=1) t2_pred = self.pred_task2(g_decoder[i][1]) t3_pred = self.pred_task3(g_decoder[i][1]) t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred, t3_pred], self.logsigma # control seed torch.backends.cudnn.enabled = False torch.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) torch.cuda.manual_seed_all(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_SPLIT = SegNet().to(device) optimizer = optim.Adam(SegNet_SPLIT.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_SPLIT), count_parameters(SegNet_SPLIT) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 ###org 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) import pdb pdb.set_trace() # Train and evaluate multi-task network multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_SPLIT, device, optimizer, scheduler, opt, 200)
7,942
44.649425
119
py
sdmgrad
sdmgrad-main/nyuv2/min_norm_solvers.py
# This code is from # Multi-Task Learning as Multi-Objective Optimization # Ozan Sener, Vladlen Koltun # Neural Information Processing Systems (NeurIPS) 2018 import numpy as np import torch class MinNormSolver: MAX_ITER = 20 STOP_CRIT = 1e-5 def _min_norm_element_from2(v1v1, v1v2, v2v2): """ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2 d is the distance (objective) optimzed v1v1 = <x1,x1> v1v2 = <x1,x2> v2v2 = <x2,x2> """ if v1v2 >= v1v1: # Case: Fig 1, third column gamma = 0.999 cost = v1v1 return gamma, cost if v1v2 >= v2v2: # Case: Fig 1, first column gamma = 0.001 cost = v2v2 return gamma, cost # Case: Fig 1, second column gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2)) cost = v2v2 + gamma * (v1v2 - v2v2) return gamma, cost def _min_norm_2d(vecs, dps): """ Find the minimum norm solution as combination of two points This is correct only in 2D ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j """ dmin = np.inf for i in range(len(vecs)): for j in range(i + 1, len(vecs)): if (i, j) not in dps: dps[(i, j)] = (vecs[i] * vecs[j]).sum().item() dps[(j, i)] = dps[(i, j)] if (i, i) not in dps: dps[(i, i)] = (vecs[i] * vecs[i]).sum().item() if (j, j) not in dps: dps[(j, j)] = (vecs[j] * vecs[j]).sum().item() c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)]) if d < dmin: dmin = d sol = [(i, j), c, d] return sol, dps def _projection2simplex(y): """ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i """ m = len(y) sorted_y = np.flip(np.sort(y), axis=0) tmpsum = 0.0 tmax_f = (np.sum(y) - 1.0) / m for i in range(m - 1): tmpsum += sorted_y[i] tmax = (tmpsum - 1) / (i + 1.0) if tmax > sorted_y[i + 1]: tmax_f = tmax break return np.maximum(y - tmax_f, np.zeros(y.shape)) def _next_point(cur_val, grad, n): proj_grad = grad - (np.sum(grad) / n) tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0] tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0]) skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7) t = 1 if len(tm1[tm1 > 1e-7]) > 0: t = np.min(tm1[tm1 > 1e-7]) if len(tm2[tm2 > 1e-7]) > 0: t = min(t, np.min(tm2[tm2 > 1e-7])) next_point = proj_grad * t + cur_val next_point = MinNormSolver._projection2simplex(next_point) return next_point def find_min_norm_element(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: grad_dir = -1.0 * np.dot(grad_mat, sol_vec) new_point = MinNormSolver._next_point(sol_vec, grad_dir, n) # Re-compute the inner products for line search v1v1 = 0.0 v1v2 = 0.0 v2v2 = 0.0 for i in range(n): for j in range(n): v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)] v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)] v2v2 += new_point[i] * new_point[j] * dps[(i, j)] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec + (1 - nc) * new_point change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def find_min_norm_element_FW(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: t_iter = np.argmin(np.dot(grad_mat, sol_vec)) v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec)) v1v2 = np.dot(sol_vec, grad_mat[:, t_iter]) v2v2 = grad_mat[t_iter, t_iter] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec new_sol_vec[t_iter] += 1 - nc change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def gradient_normalizers(grads, losses, normalization_type): gn = {} if normalization_type == 'l2': for t in grads: gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'loss': for t in grads: gn[t] = losses[t] elif normalization_type == 'loss+': for t in grads: gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'none': for t in grads: gn[t] = 1.0 else: print('ERROR: Invalid Normalization Type') return gn
7,358
35.979899
147
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_mtan.py
import numpy as np import random import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Attention Network') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--seed', default=0, type=int, help='the seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(3): if j < 2: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 3 for _ in range(2)) for i in range(3): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(3): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(3): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred, t3_pred], self.logsigma # control seed torch.backends.cudnn.enabled = False torch.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) torch.cuda.manual_seed_all(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_MTAN = SegNet().to(device) optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN), count_parameters(SegNet_MTAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
11,617
49.077586
119
py
sdmgrad
sdmgrad-main/nyuv2/create_dataset.py
from torch.utils.data.dataset import Dataset import os import torch import torch.nn.functional as F import fnmatch import numpy as np import random class RandomScaleCrop(object): """ Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34. """ def __init__(self, scale=[1.0, 1.2, 1.5]): self.scale = scale def __call__(self, img, label, depth, normal): height, width = img.shape[-2:] sc = self.scale[random.randint(0, len(self.scale) - 1)] h, w = int(height / sc), int(width / sc) i = random.randint(0, height - h) j = random.randint(0, width - w) img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear', align_corners=True).squeeze(0) label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0).squeeze(0) depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0) normal_ = F.interpolate(normal[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear', align_corners=True).squeeze(0) return img_, label_, depth_ / sc, normal_ class NYUv2(Dataset): """ We could further improve the performance with the data augmentation of NYUv2 defined in: [1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing [2] Pattern affinitive propagation across depth, surface normal and semantic segmentation [3] Mti-net: Multiscale task interaction networks for multi-task learning 1. Random scale in a selected raio 1.0, 1.2, and 1.5. 2. Random horizontal flip. Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper. """ def __init__(self, root, train=True, augmentation=False): self.train = train self.root = os.path.expanduser(root) self.augmentation = augmentation # read the data file if train: self.data_path = root + '/train' else: self.data_path = root + '/val' # calculate data length self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy')) def __getitem__(self, index): # load data from the pre-processed npy files image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0)) semantic = torch.from_numpy(np.load(self.data_path + '/label/{:d}.npy'.format(index))) depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0)) normal = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/normal/{:d}.npy'.format(index)), -1, 0)) # apply data augmentation if required if self.augmentation: image, semantic, depth, normal = RandomScaleCrop()(image, semantic, depth, normal) if torch.rand(1) < 0.5: image = torch.flip(image, dims=[2]) semantic = torch.flip(semantic, dims=[1]) depth = torch.flip(depth, dims=[2]) normal = torch.flip(normal, dims=[2]) normal[0, :, :] = -normal[0, :, :] return image.float(), semantic.float(), depth.float(), normal.float() def __len__(self): return self.data_len
3,568
40.988235
127
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_cross.py
import numpy as np import random import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Cross') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--seed', default=0, type=int, help='the seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block_t = nn.ModuleList( [nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])]) self.decoder_block_t = nn.ModuleList( [nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])]) for j in range(3): if j < 2: self.encoder_block_t.append( nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])) self.decoder_block_t.append( nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])) for i in range(4): if i == 0: self.encoder_block_t[j].append( self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=True)) self.decoder_block_t[j].append( self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=True)) else: self.encoder_block_t[j].append( self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=False)) self.decoder_block_t[j].append( self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=False)) # define cross-stitch units self.cs_unit_encoder = nn.Parameter(data=torch.ones(4, 3)) self.cs_unit_decoder = nn.Parameter(data=torch.ones(5, 3)) # define task specific layers self.pred_task1 = self.conv_layer([filter[0], self.class_nb], bottle_neck=True, pred_layer=True) self.pred_task2 = self.conv_layer([filter[0], 1], bottle_neck=True, pred_layer=True) self.pred_task3 = self.conv_layer([filter[0], 3], bottle_neck=True, pred_layer=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Parameter): nn.init.constant(m.weight, 1) def conv_layer(self, channel, bottle_neck, pred_layer=False): if bottle_neck: if not pred_layer: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1), nn.BatchNorm2d(channel[2]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1), nn.BatchNorm2d(channel[2]), nn.ReLU(inplace=True), ) return conv_block def forward(self, x): encoder_conv_t, decoder_conv_t, encoder_samp_t, decoder_samp_t, indices_t = ([0] * 3 for _ in range(5)) for i in range(3): encoder_conv_t[i], decoder_conv_t[i], encoder_samp_t[i], decoder_samp_t[i], indices_t[i] = ( [0] * 5 for _ in range(5)) # task branch 1 for i in range(5): for j in range(3): if i == 0: encoder_conv_t[j][i] = self.encoder_block_t[j][i](x) encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i]) else: encoder_cross_stitch = self.cs_unit_encoder[i - 1][0] * encoder_samp_t[0][i - 1] + \ self.cs_unit_encoder[i - 1][1] * encoder_samp_t[1][i - 1] + \ self.cs_unit_encoder[i - 1][2] * encoder_samp_t[2][i - 1] encoder_conv_t[j][i] = self.encoder_block_t[j][i](encoder_cross_stitch) encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i]) for i in range(5): for j in range(3): if i == 0: decoder_cross_stitch = self.cs_unit_decoder[i][0] * encoder_samp_t[0][-1] + \ self.cs_unit_decoder[i][1] * encoder_samp_t[1][-1] + \ self.cs_unit_decoder[i][2] * encoder_samp_t[2][-1] decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1]) decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i]) else: decoder_cross_stitch = self.cs_unit_decoder[i][0] * decoder_conv_t[0][i - 1] + \ self.cs_unit_decoder[i][1] * decoder_conv_t[1][i - 1] + \ self.cs_unit_decoder[i][2] * decoder_conv_t[2][i - 1] decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1]) decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i]) # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(decoder_conv_t[0][-1]), dim=1) t2_pred = self.pred_task2(decoder_conv_t[1][-1]) t3_pred = self.pred_task3(decoder_conv_t[2][-1]) t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred, t3_pred], self.logsigma # control seed torch.backends.cudnn.enabled = False torch.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) torch.cuda.manual_seed_all(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_CROSS = SegNet().to(device) optimizer = optim.Adam(SegNet_CROSS.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_CROSS), count_parameters(SegNet_CROSS) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_CROSS, device, optimizer, scheduler, opt, 200)
9,335
47.879581
119
py
sdmgrad
sdmgrad-main/nyuv2/model_segnet_mt.py
import numpy as np import random import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Split') parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root') parser.add_argument('--method', default='sdmgrad', type=str, help='optimization method') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--alpha', default=0.3, type=float, help='the alpha') parser.add_argument('--lr', default=1e-4, type=float, help='the learning rate') parser.add_argument('--seed', default=1, type=int, help='the seed') parser.add_argument('--niter', default=20, type=int, help='number of inner iteration') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(3): if j < 2: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def shared_modules(self): return [ self.encoder_block, self.decoder_block, self.conv_block_enc, self.conv_block_dec, #self.encoder_att, self.decoder_att, self.encoder_block_att, self.decoder_block_att, self.down_sampling, self.up_sampling ] def zero_grad_shared_modules(self): for mm in self.shared_modules(): mm.zero_grad() def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 3 for _ in range(2)) for i in range(3): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(3): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(3): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred, t3_pred], self.logsigma class SegNetSplit(nn.Module): def __init__(self): super(SegNetSplit, self).__init__() # initialise network parameters if opt.type == 'wide': filter = [64, 128, 256, 512, 1024] else: filter = [64, 128, 256, 512, 512] self.class_nb = 13 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task specific layers self.pred_task1 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=self.class_nb, kernel_size=1, padding=0)) self.pred_task2 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=1, kernel_size=1, padding=0)) self.pred_task3 = nn.Sequential( nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=filter[0], out_channels=3, kernel_size=1, padding=0)) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) # define convolutional block def conv_layer(self, channel): if opt.type == 'deep': conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True)) return conv_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # global shared encoder-decoder network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(g_decoder[i][1]), dim=1) t2_pred = self.pred_task2(g_decoder[i][1]) t3_pred = self.pred_task3(g_decoder[i][1]) t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred, t3_pred], self.logsigma # control seed torch.backends.cudnn.enabled = False torch.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) torch.cuda.manual_seed_all(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_MTAN = SegNet().to(device) optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=opt.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN), count_parameters(SegNet_MTAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on NYUv2.') else: nyuv2_train_set = NYUv2(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') nyuv2_test_set = NYUv2(root=dataset_path, train=False) batch_size = 2 nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True) nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_mgd_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200, opt.method, opt.alpha, opt.seed)
18,041
48.027174
119
py
sdmgrad
sdmgrad-main/consistency/model_resnet.py
# resnet18 base model for Pareto MTL import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.loss import CrossEntropyLoss from torchvision import models class RegressionTrainResNet(torch.nn.Module): def __init__(self, model, init_weight): super(RegressionTrainResNet, self).__init__() self.model = model self.weights = torch.nn.Parameter(torch.from_numpy(init_weight).float()) self.ce_loss = CrossEntropyLoss() def forward(self, x, ts): n_tasks = 2 ys = self.model(x) task_loss = [] for i in range(n_tasks): task_loss.append(self.ce_loss(ys[:, i], ts[:, i])) task_loss = torch.stack(task_loss) return task_loss class MnistResNet(torch.nn.Module): def __init__(self, n_tasks): super(MnistResNet, self).__init__() self.n_tasks = n_tasks self.feature_extractor = models.resnet18(pretrained=False) self.feature_extractor.conv1 = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) fc_in_features = self.feature_extractor.fc.in_features self.feature_extractor.fc = torch.nn.Linear(fc_in_features, 100) self.ce_loss = CrossEntropyLoss() for i in range(self.n_tasks): setattr(self, 'task_{}'.format(i), nn.Linear(100, 10)) def shared_modules(self): return [self.feature_extractor] def zero_grad_shared_modules(self): for mm in self.shared_modules(): mm.zero_grad() def forward(self, x): x = F.relu(self.feature_extractor(x)) outs = [] for i in range(self.n_tasks): layer = getattr(self, 'task_{}'.format(i)) outs.append(layer(x)) return torch.stack(outs, dim=1) def forward_loss(self, x, ts): ys = self.forward(x) task_loss = [] for i in range(self.n_tasks): task_loss.append(self.ce_loss(ys[:, i], ts[:, i])) task_loss = torch.stack(task_loss) return task_loss
2,346
31.150685
80
py
sdmgrad
sdmgrad-main/consistency/utils.py
import numpy as np from min_norm_solvers import MinNormSolver from scipy.optimize import minimize, Bounds, minimize_scalar import torch from torch import linalg as LA from torch.nn import functional as F def euclidean_proj_simplex(v, s=1): """ Compute the Euclidean projection on a positive simplex Solves the optimisation problem (using the algorithm from [1]): min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0 Parameters ---------- v: (n,) numpy array, n-dimensional vector to project s: int, optional, default: 1, radius of the simplex Returns ------- w: (n,) numpy array, Euclidean projection of v on the simplex Notes ----- The complexity of this algorithm is in O(n log(n)) as it involves sorting v. Better alternatives exist for high-dimensional sparse vectors (cf. [1]) However, this implementation still easily scales to millions of dimensions. References ---------- [1] Efficient Projections onto the .1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf [2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541 https://arxiv.org/pdf/1309.1541.pdf [3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py """ assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s v = v.astype(np.float64) n, = v.shape # will raise ValueError if v is not 1-D # check if we are already on the simplex if v.sum() == s and np.alltrue(v >= 0): # best projection: itself! return v # get the array of cumulative sums of a sorted (decreasing) copy of v u = np.sort(v)[::-1] cssv = np.cumsum(u) # get the number of > 0 components of the optimal solution rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1] # compute the Lagrange multiplier associated to the simplex constraint theta = float(cssv[rho] - s) / (rho + 1) # compute the projection by thresholding v using theta w = (v - theta).clip(min=0) return w def grad2vec(m, grads, grad_dims, task): # store the gradients grads[:, task].fill_(0.0) cnt = 0 for mm in m.shared_modules(): for p in mm.parameters(): grad = p.grad if grad is not None: grad_cur = grad.data.detach().clone() beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) grads[beg:en, task].copy_(grad_cur.data.view(-1)) cnt += 1 def overwrite_grad(m, newgrad, grad_dims): # newgrad = newgrad * 2 # to match the sum loss cnt = 0 for mm in m.shared_modules(): for param in mm.parameters(): beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) this_grad = newgrad[beg:en].contiguous().view(param.data.size()) param.grad = this_grad.data.clone() cnt += 1 def mean_grad(grads): return grads.mean(1) def mgd(grads): grads_cpu = grads.t().cpu() sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])]) w = torch.FloatTensor(sol).to(grads.device) g = grads.mm(w.view(-1, 1)).view(-1) return g def cagrad(grads, alpha=0.5, rescale=0): g1 = grads[:, 0] g2 = grads[:, 1] g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12) # want to minimize g_w^Tg_0 + c*||g_0||*||g_w|| coef = alpha * g0_norm def obj(x): # g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22 # g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22 return coef * np.sqrt(x**2 * (g11 + g22 - 2 * g12) + 2 * x * (g12 - g22) + g22 + 1e-8) + 0.5 * x * (g11 + g22 - 2 * g12) + (0.5 + x) * (g12 - g22) + g22 res = minimize_scalar(obj, bounds=(0, 1), method='bounded') x = res.x gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-8) lmbda = coef / (gw_norm + 1e-8) g = (0.5 + lmbda * x) * g1 + (0.5 + lmbda * (1 - x)) * g2 # g0 + lmbda*gw if rescale == 0: return g elif rescale == 1: return g / (1 + alpha**2) else: return g / (1 + alpha) def sdmgrad(w, grads, lmbda, niter=20): """ our proposed sdmgrad """ GG = torch.mm(grads.t(), grads) scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4)) GG = GG / scale.pow(2) Gg = torch.mean(GG, dim=1) gg = torch.mean(Gg) w.requires_grad = True optimizer = torch.optim.SGD([w], lr=10, momentum=0.5) for i in range(niter): optimizer.zero_grad() obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg obj.backward() optimizer.step() proj = euclidean_proj_simplex(w.data.cpu().numpy()) w.data.copy_(torch.from_numpy(proj).data) w.requires_grad = False g0 = torch.mean(grads, dim=1) gw = torch.mv(grads, w) g = (gw + lmbda * g0) / (1 + lmbda)
5,435
34.070968
113
py
sdmgrad
sdmgrad-main/consistency/model_lenet.py
# lenet base model for Pareto MTL import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.loss import CrossEntropyLoss class RegressionTrain(torch.nn.Module): def __init__(self, model, init_weight): super(RegressionTrain, self).__init__() self.model = model self.weights = torch.nn.Parameter(torch.from_numpy(init_weight).float()) self.ce_loss = CrossEntropyLoss() def forward(self, x, ts): n_tasks = 2 ys = self.model(x) task_loss = [] for i in range(n_tasks): task_loss.append(self.ce_loss(ys[:, i], ts[:, i])) task_loss = torch.stack(task_loss) return task_loss class RegressionModel(torch.nn.Module): def __init__(self, n_tasks): super(RegressionModel, self).__init__() self.n_tasks = n_tasks self.conv1 = nn.Conv2d(1, 10, 9, 1) self.conv2 = nn.Conv2d(10, 20, 5, 1) self.fc1 = nn.Linear(5 * 5 * 20, 50) self.ce_loss = CrossEntropyLoss() for i in range(self.n_tasks): setattr(self, 'task_{}'.format(i), nn.Linear(50, 10)) def shared_modules(self): return [self.conv1, self.conv2, self.fc1] def zero_grad_shared_modules(self): for mm in self.shared_modules(): mm.zero_grad() def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 5 * 5 * 20) x = F.relu(self.fc1(x)) outs = [] for i in range(self.n_tasks): layer = getattr(self, 'task_{}'.format(i)) outs.append(layer(x)) return torch.stack(outs, dim=1) def forward_loss(self, x, ts): ys = self.forward(x) task_loss = [] for i in range(self.n_tasks): task_loss.append(self.ce_loss(ys[:, i], ts[:, i])) task_loss = torch.stack(task_loss) return task_loss
2,006
26.875
80
py
sdmgrad
sdmgrad-main/consistency/min_norm_solvers.py
# This code is from # Multi-Task Learning as Multi-Objective Optimization # Ozan Sener, Vladlen Koltun # Neural Information Processing Systems (NeurIPS) 2018 import numpy as np import torch class MinNormSolver: MAX_ITER = 20 STOP_CRIT = 1e-5 def _min_norm_element_from2(v1v1, v1v2, v2v2): """ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2 d is the distance (objective) optimzed v1v1 = <x1,x1> v1v2 = <x1,x2> v2v2 = <x2,x2> """ if v1v2 >= v1v1: # Case: Fig 1, third column gamma = 0.999 cost = v1v1 return gamma, cost if v1v2 >= v2v2: # Case: Fig 1, first column gamma = 0.001 cost = v2v2 return gamma, cost # Case: Fig 1, second column gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2)) cost = v2v2 + gamma * (v1v2 - v2v2) return gamma, cost def _min_norm_2d(vecs, dps): """ Find the minimum norm solution as combination of two points This is correct only in 2D ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j """ dmin = np.inf for i in range(len(vecs)): for j in range(i + 1, len(vecs)): if (i, j) not in dps: dps[(i, j)] = (vecs[i] * vecs[j]).sum().item() dps[(j, i)] = dps[(i, j)] if (i, i) not in dps: dps[(i, i)] = (vecs[i] * vecs[i]).sum().item() if (j, j) not in dps: dps[(j, j)] = (vecs[j] * vecs[j]).sum().item() c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)]) if d < dmin: dmin = d sol = [(i, j), c, d] return sol, dps def _projection2simplex(y): """ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i """ m = len(y) sorted_y = np.flip(np.sort(y), axis=0) tmpsum = 0.0 tmax_f = (np.sum(y) - 1.0) / m for i in range(m - 1): tmpsum += sorted_y[i] tmax = (tmpsum - 1) / (i + 1.0) if tmax > sorted_y[i + 1]: tmax_f = tmax break return np.maximum(y - tmax_f, np.zeros(y.shape)) def _next_point(cur_val, grad, n): proj_grad = grad - (np.sum(grad) / n) tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0] tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0]) skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7) t = 1 if len(tm1[tm1 > 1e-7]) > 0: t = np.min(tm1[tm1 > 1e-7]) if len(tm2[tm2 > 1e-7]) > 0: t = min(t, np.min(tm2[tm2 > 1e-7])) next_point = proj_grad * t + cur_val next_point = MinNormSolver._projection2simplex(next_point) return next_point def find_min_norm_element(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: grad_dir = -1.0 * np.dot(grad_mat, sol_vec) new_point = MinNormSolver._next_point(sol_vec, grad_dir, n) # Re-compute the inner products for line search v1v1 = 0.0 v1v2 = 0.0 v2v2 = 0.0 for i in range(n): for j in range(n): v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)] v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)] v2v2 += new_point[i] * new_point[j] * dps[(i, j)] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec + (1 - nc) * new_point change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def find_min_norm_element_FW(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: t_iter = np.argmin(np.dot(grad_mat, sol_vec)) v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec)) v1v2 = np.dot(sol_vec, grad_mat[:, t_iter]) v2v2 = grad_mat[t_iter, t_iter] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec new_sol_vec[t_iter] += 1 - nc change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def gradient_normalizers(grads, losses, normalization_type): gn = {} if normalization_type == 'l2': for t in grads: gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'loss': for t in grads: gn[t] = losses[t] elif normalization_type == 'loss+': for t in grads: gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'none': for t in grads: gn[t] = 1.0 else: print('ERROR: Invalid Normalization Type') return gn(base)
7,364
36.01005
147
py
sdmgrad
sdmgrad-main/consistency/train.py
import numpy as np import torch import torch.utils.data from torch import linalg as LA from torch.autograd import Variable from model_lenet import RegressionModel, RegressionTrain from model_resnet import MnistResNet, RegressionTrainResNet from utils import * import pickle import argparse parser = argparse.ArgumentParser(description='Multi-Fashion-MNIST') parser.add_argument('--base', default='lenet', type=str, help='base model') parser.add_argument('--solver', default='sdmgrad', type=str, help='which optimization algorithm to use') parser.add_argument('--alpha', default=0.5, type=float, help='the alpha used in cagrad') parser.add_argument('--lmbda', default=0.5, type=float, help='the lmbda used in sdmgrad') parser.add_argument('--seed', default=0, type=int, help='the seed') parser.add_argument('--niter', default=100, type=int, help='step of (outer) iteration') parser.add_argument('--initer', default=20, type=int, help='step of inner itration') args = parser.parse_args() torch.manual_seed(args.seed) np.random.seed(args.seed) torch.cuda.manual_seed_all(args.seed) def train(dataset, base_model, solver, alpha, lmbda, niter, initer): # generate #npref preference vectors n_tasks = 2 device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # load dataset # MultiMNIST: multi_mnist.pickle if dataset == 'mnist': with open('./data/multi_mnist.pickle', 'rb') as f: trainX, trainLabel, testX, testLabel = pickle.load(f) # MultiFashionMNIST: multi_fashion.pickle if dataset == 'fashion': with open('./data/multi_fashion.pickle', 'rb') as f: trainX, trainLabel, testX, testLabel = pickle.load(f) # Multi-(Fashion+MNIST): multi_fashion_and_mnist.pickle if dataset == 'fashion_and_mnist': with open('./data/multi_fashion_and_mnist.pickle', 'rb') as f: trainX, trainLabel, testX, testLabel = pickle.load(f) trainX = torch.from_numpy(trainX.reshape(120000, 1, 36, 36)).float() trainLabel = torch.from_numpy(trainLabel).long() testX = torch.from_numpy(testX.reshape(20000, 1, 36, 36)).float() testLabel = torch.from_numpy(testLabel).long() train_set = torch.utils.data.TensorDataset(trainX, trainLabel) test_set = torch.utils.data.TensorDataset(testX, testLabel) batch_size = 256 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) print('==>>> total trainning batch number: {}'.format(len(train_loader))) print('==>>> total testing batch number: {}'.format(len(test_loader))) # define the base model for ParetoMTL if base_model == 'lenet': model = RegressionModel(n_tasks).to(device) if base_model == 'resnet18': model = MnistResNet(n_tasks).to(device) # choose different optimizer for different base model if base_model == 'lenet': optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15, 30, 45, 60, 75, 90], gamma=0.5) if base_model == 'resnet18': optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1) # store infomation during optimization task_train_losses = [] train_accs = [] # grad grad_dims = [] for mm in model.shared_modules(): for param in mm.parameters(): grad_dims.append(param.data.numel()) grads = torch.Tensor(sum(grad_dims), n_tasks).to(device) w = torch.ones(n_tasks).to(device) / n_tasks # run niter epochs for t in range(niter): model.train() for it, (X, ts) in enumerate(train_loader): X, ts = X.to(device), ts.to(device) optimizer.zero_grad() # compute stochastic gradient task_loss = model.forward_loss(X, ts) # \nabla F, grads [n_model, n_tasks] for i in range(n_tasks): if i == 0: task_loss[i].backward(retain_graph=True) else: task_loss[i].backward() grad2vec(model, grads, grad_dims, i) model.zero_grad_shared_modules() if solver == 'cagrad': g = cagrad(grads, alpha, rescale=1) elif solver == 'mgd': g = mgd(grads) elif solver == 'sgd': g = mean_grad(grads) elif solver == 'sdmgrad': g = sdmgrad(w, grads, lmbda, initer) else: raise ValueError('Not supported solver.') overwrite_grad(model, g, grad_dims) # optimization step optimizer.step() scheduler.step() # calculate and record performance if t == 0 or (t + 1) % 2 == 0: model.eval() with torch.no_grad(): total_train_loss = [] train_acc = [] correct1_train = 0 correct2_train = 0 for it, (X, ts) in enumerate(train_loader): X, ts = X.to(device), ts.to(device) valid_train_loss = model.forward_loss(X, ts) total_train_loss.append(valid_train_loss) output1 = model(X).max(2, keepdim=True)[1][:, 0] output2 = model(X).max(2, keepdim=True)[1][:, 1] correct1_train += output1.eq(ts[:, 0].view_as(output1)).sum().item() correct2_train += output2.eq(ts[:, 1].view_as(output2)).sum().item() train_acc = np.stack([ 1.0 * correct1_train / len(train_loader.dataset), 1.0 * correct2_train / len(train_loader.dataset) ]) total_train_loss = torch.stack(total_train_loss) average_train_loss = torch.mean(total_train_loss, dim=0) # record and print task_train_losses.append(average_train_loss.data.cpu().numpy()) train_accs.append(train_acc) print('{}/{}: train_loss={}, train_acc={}'.format(t + 1, niter, task_train_losses[-1], train_accs[-1])) save_path = './saved_model/%s_%s_solver_%s_niter_%d_seed_%d.pickle' % (dataset, base_model, solver, niter, args.seed) torch.save(model.state_dict(), save_path) def run(dataset='mnist', base_model='lenet', solver='sdmgrad', alpha=0.5, lmbda=0.5, niter=100, initer=20): """ run stochatic moo algorithms """ train(dataset, base_model, solver, alpha, lmbda, niter, initer) run(dataset='fashion_and_mnist', base_model=args.base, solver=args.solver, alpha=args.alpha, lmbda=args.lmbda, niter=args.niter, initer=args.initer)
7,010
36.292553
118
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_single.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Single-task: One Task') parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--seed', default=0, type=int, help='control seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) if opt.task == 'semantic': self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True) if opt.task == 'depth': self.pred_task = self.conv_layer([filter[0], 1], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task prediction layers if opt.task == 'semantic': pred = F.log_softmax(self.pred_task(g_decoder[-1][-1]), dim=1) if opt.task == 'depth': pred = self.pred_task(g_decoder[-1][-1]) return pred control_seed(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet = SegNet().to(device) optimizer = optim.Adam(SegNet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet), count_parameters(SegNet) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate single-task network single_task_trainer(train_loader, test_loader, SegNet, device, optimizer, scheduler, opt, 200)
6,370
43.552448
120
py
sdmgrad
sdmgrad-main/cityscapes/evaluate.py
import matplotlib.pyplot as plt import seaborn as sns import numpy as np import torch methods = [ "sdmgrad-1e-1", "sdmgrad-2e-1", "sdmgrad-3e-1", "sdmgrad-4e-1", "sdmgrad-5e-1", "sdmgrad-6e-1", "sdmgrad-7e-1", "sdmgrad-8e-1", "sdmgrad-9e-1", "sdmgrad-1e0" ] colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "tab:green", "tab:cyan", "tab:blue", "tab:red"] stats = ["semantic loss", "mean iou", "pix acc", "depth loss", "abs err", "rel err"] stats_idx_map = [4, 5, 6, 8, 9, 10] delta_stats = ["mean iou", "pix acc", "abs err", "rel err"] time_idx = 22 # change random seeds used in the experiments here seeds = [0, 1, 2] logs = {} min_epoch = 100000 for m in methods: logs[m] = {"train": [None for _ in range(3)], "test": [None for _ in range(3)]} for seed in seeds: logs[m]["train"][seed] = {} logs[m]["test"][seed] = {} for stat in stats: for seed in seeds: logs[m]["train"][seed][stat] = [] logs[m]["test"][seed][stat] = [] for seed in seeds: logs[m]["train"][seed]["time"] = [] for seed in seeds: fname = f"logs/{m}-sd{seed}.log" with open(fname, "r") as f: lines = f.readlines() for line in lines: if line.startswith("Epoch"): ws = line.split(" ") for i, stat in enumerate(stats): logs[m]["train"][seed][stat].append(float(ws[stats_idx_map[i]])) logs[m]["test"][seed][stat].append(float(ws[stats_idx_map[i] + 9])) logs[m]["train"][seed]["time"].append(float(ws[time_idx])) n_epoch = min(len(logs[m]["train"][seed]["semantic loss"]), len(logs[m]["test"][seed]["semantic loss"])) if n_epoch < min_epoch: min_epoch = n_epoch print(m, n_epoch) test_stats = {} train_stats = {} learning_time = {} print(" " * 25 + " | ".join([f"{s:5s}" for s in stats])) for mi, mode in enumerate(["train", "test"]): if mi == 1: print(mode) for mmi, m in enumerate(methods): if m not in test_stats: test_stats[m] = {} train_stats[m] = {} string = f"{m:30s} " for stat in stats: x = [] for seed in seeds: x.append(np.array(logs[m][mode][seed][stat][min_epoch - 10:min_epoch]).mean()) x = np.array(x) if mode == "test": test_stats[m][stat] = x.copy() else: train_stats[m][stat] = x.copy() mu = x.mean() std = x.std() / np.sqrt(3) string += f" | {mu:5.4f}" if mode == "test": print(string) for m in methods: learning_time[m] = np.array([np.array(logs[m]["train"][sd]["time"]).mean() for sd in seeds]) for method in methods: average_loss = np.mean([train_stats[method]["semantic loss"].mean(), train_stats[method]["depth loss"].mean()]) print(f"{method} average training loss {average_loss}") base = np.array([0.7401, 0.9316, 0.0125, 27.77]) sign = np.array([1, 1, 0, 0]) kk = np.ones(4) * -1 def delta_fn(a): return (kk**sign * (a - base) / base).mean() * 100. # *100 for percentage deltas = {} for method in methods: tmp = np.zeros(4) for i, stat in enumerate(delta_stats): tmp[i] = test_stats[method][stat].mean() deltas[method] = delta_fn(tmp) print(f"{method:30s} delta: {deltas[method]:4.3f}")
3,545
30.380531
117
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_stan.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Single-task: Attention Network') parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(2): if j < 1: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) #self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 2 for _ in range(2)) for i in range(2): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(2): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(2): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) #t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) #t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred], self.logsigma # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_STAN = SegNet().to(device) optimizer = optim.Adam(SegNet_STAN.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_STAN), count_parameters(SegNet_STAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate single-task network single_task_trainer(train_loader, test_loader, SegNet_STAN, device, optimizer, scheduler, opt, 200)
11,156
49.713636
119
py
sdmgrad
sdmgrad-main/cityscapes/utils.py
import torch import torch.nn.functional as F import numpy as np import random import time from copy import deepcopy from min_norm_solvers import MinNormSolver from scipy.optimize import minimize, Bounds, minimize_scalar def euclidean_proj_simplex(v, s=1): """ Compute the Euclidean projection on a positive simplex Solves the optimisation problem (using the algorithm from [1]): min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0 Parameters ---------- v: (n,) numpy array, n-dimensional vector to project s: int, optional, default: 1, radius of the simplex Returns ------- w: (n,) numpy array, Euclidean projection of v on the simplex Notes ----- The complexity of this algorithm is in O(n log(n)) as it involves sorting v. Better alternatives exist for high-dimensional sparse vectors (cf. [1]) However, this implementation still easily scales to millions of dimensions. References ---------- [1] Efficient Projections onto the .1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf [2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541 https://arxiv.org/pdf/1309.1541.pdf [3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py """ assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s v = v.astype(np.float64) n, = v.shape # will raise ValueError if v is not 1-D # check if we are already on the simplex if v.sum() == s and np.alltrue(v >= 0): # best projection: itself! return v # get the array of cumulative sums of a sorted (decreasing) copy of v u = np.sort(v)[::-1] cssv = np.cumsum(u) # get the number of > 0 components of the optimal solution rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1] # compute the Lagrange multiplier associated to the simplex constraint theta = float(cssv[rho] - s) / (rho + 1) # compute the projection by thresholding v using theta w = (v - theta).clip(min=0) return w """ Define task metrics, loss functions and model trainer here. """ def control_seed(seed): torch.backends.cudnn.enabled = False torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def model_fit(x_pred, x_output, task_type): device = x_pred.device # binary mark to mask out undefined pixel space binary_mask = (torch.sum(x_output, dim=1) != 0).float().unsqueeze(1).to(device) if task_type == 'semantic': # semantic loss: depth-wise cross entropy loss = F.nll_loss(x_pred, x_output, ignore_index=-1) if task_type == 'depth': # depth loss: l1 norm loss = torch.sum(torch.abs(x_pred - x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0) if task_type == 'normal': # normal loss: dot product loss = 1 - torch.sum((x_pred * x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0) return loss # Legacy: compute mIoU and Acc. for each image and average across all images. # def compute_miou(x_pred, x_output): # _, x_pred_label = torch.max(x_pred, dim=1) # x_output_label = x_output # batch_size = x_pred.size(0) # class_nb = x_pred.size(1) # device = x_pred.device # for i in range(batch_size): # true_class = 0 # first_switch = True # invalid_mask = (x_output[i] >= 0).float() # for j in range(class_nb): # pred_mask = torch.eq(x_pred_label[i], j * torch.ones(x_pred_label[i].shape).long().to(device)) # true_mask = torch.eq(x_output_label[i], j * torch.ones(x_output_label[i].shape).long().to(device)) # mask_comb = pred_mask.float() + true_mask.float() # union = torch.sum((mask_comb > 0).float() * invalid_mask) # remove non-defined pixel predictions # intsec = torch.sum((mask_comb > 1).float()) # if union == 0: # continue # if first_switch: # class_prob = intsec / union # first_switch = False # else: # class_prob = intsec / union + class_prob # true_class += 1 # if i == 0: # batch_avg = class_prob / true_class # else: # batch_avg = class_prob / true_class + batch_avg # return batch_avg / batch_size # def compute_iou(x_pred, x_output): # _, x_pred_label = torch.max(x_pred, dim=1) # x_output_label = x_output # batch_size = x_pred.size(0) # for i in range(batch_size): # if i == 0: # pixel_acc = torch.div( # torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()), # torch.sum((x_output_label[i] >= 0).float())) # else: # pixel_acc = pixel_acc + torch.div( # torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()), # torch.sum((x_output_label[i] >= 0).float())) # return pixel_acc / batch_size # New mIoU and Acc. formula: accumulate every pixel and average across all pixels in all images class ConfMatrix(object): def __init__(self, num_classes): self.num_classes = num_classes self.mat = None def update(self, pred, target): n = self.num_classes if self.mat is None: self.mat = torch.zeros((n, n), dtype=torch.int64, device=pred.device) with torch.no_grad(): k = (target >= 0) & (target < n) inds = n * target[k].to(torch.int64) + pred[k] self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) def get_metrics(self): h = self.mat.float() acc = torch.diag(h).sum() / h.sum() iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h)) return torch.mean(iu).item(), acc.item() def depth_error(x_pred, x_output): device = x_pred.device binary_mask = (torch.sum(x_output, dim=1) != 0).unsqueeze(1).to(device) x_pred_true = x_pred.masked_select(binary_mask) x_output_true = x_output.masked_select(binary_mask) abs_err = torch.abs(x_pred_true - x_output_true) rel_err = torch.abs(x_pred_true - x_output_true) / x_output_true return (torch.sum(abs_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item(), \ (torch.sum(rel_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item() def normal_error(x_pred, x_output): binary_mask = (torch.sum(x_output, dim=1) != 0) error = torch.acos(torch.clamp(torch.sum(x_pred * x_output, 1).masked_select(binary_mask), -1, 1)).detach().cpu().numpy() error = np.degrees(error) return np.mean(error), np.median(error), np.mean(error < 11.25), np.mean(error < 22.5), np.mean(error < 30) """ =========== Universal Multi-task Trainer =========== """ def multi_task_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200): train_batch = len(train_loader) test_batch = len(test_loader) T = opt.temp avg_cost = np.zeros([total_epoch, 12], dtype=np.float32) lambda_weight = np.ones([2, total_epoch]) for index in range(total_epoch): t0 = time.time() cost = np.zeros(12, dtype=np.float32) # apply Dynamic Weight Average if opt.weight == 'dwa': if index == 0 or index == 1: lambda_weight[:, index] = 1.0 else: w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0] w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3] lambda_weight[0, index] = 2 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T)) lambda_weight[1, index] = 2 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T)) # iteration for all batches multi_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(multi_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth = train_depth.to(device) train_pred, logsigma = multi_task_model(train_data) optimizer.zero_grad() train_loss = [ model_fit(train_pred[0], train_label, 'semantic'), model_fit(train_pred[1], train_depth, 'depth') ] if opt.weight == 'equal' or opt.weight == 'dwa': loss = sum([lambda_weight[i, index] * train_loss[i] for i in range(2)]) else: loss = sum(1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 for i in range(2)) loss.backward() optimizer.step() # accumulate label prediction for every pixel in training images conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss[0].item() cost[3] = train_loss[1].item() cost[4], cost[5] = depth_error(train_pred[1], train_depth) avg_cost[index, :6] += cost[:6] / train_batch # compute mIoU and acc avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data multi_task_model.eval() conf_mat = ConfMatrix(multi_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth = test_depth.to(device) test_pred, _ = multi_task_model(test_data) test_loss = [ model_fit(test_pred[0], test_label, 'semantic'), model_fit(test_pred[1], test_depth, 'depth') ] conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten()) cost[6] = test_loss[0].item() cost[9] = test_loss[1].item() cost[10], cost[11] = depth_error(test_pred[1], test_depth) avg_cost[index, 6:] += cost[6:] / test_batch # compute mIoU and acc avg_cost[index, 7:9] = conf_mat.get_metrics() scheduler.step() t1 = time.time() print( 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} || TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | TIME: {:.4f}' .format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], t1 - t0)) """ =========== Universal Single-task Trainer =========== """ def single_task_trainer(train_loader, test_loader, single_task_model, device, optimizer, scheduler, opt, total_epoch=200): train_batch = len(train_loader) test_batch = len(test_loader) avg_cost = np.zeros([total_epoch, 12], dtype=np.float32) for index in range(total_epoch): cost = np.zeros(12, dtype=np.float32) # iteration for all batches single_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(single_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth = train_depth.to(device) train_pred = single_task_model(train_data) optimizer.zero_grad() if opt.task == 'semantic': train_loss = model_fit(train_pred, train_label, opt.task) train_loss.backward() optimizer.step() conf_mat.update(train_pred.argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss.item() if opt.task == 'depth': train_loss = model_fit(train_pred, train_depth, opt.task) train_loss.backward() optimizer.step() cost[3] = train_loss.item() cost[4], cost[5] = depth_error(train_pred, train_depth) avg_cost[index, :6] += cost[:6] / train_batch if opt.task == 'semantic': avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data single_task_model.eval() conf_mat = ConfMatrix(single_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth = test_depth.to(device) test_pred = single_task_model(test_data) if opt.task == 'semantic': test_loss = model_fit(test_pred, test_label, opt.task) conf_mat.update(test_pred.argmax(1).flatten(), test_label.flatten()) cost[6] = test_loss.item() if opt.task == 'depth': test_loss = model_fit(test_pred, test_depth, opt.task) cost[9] = test_loss.item() cost[10], cost[11] = depth_error(test_pred, test_depth) avg_cost[index, 6:] += cost[6:] / test_batch if opt.task == 'semantic': avg_cost[index, 7:9] = conf_mat.get_metrics() scheduler.step() if opt.task == 'semantic': print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format( index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8])) if opt.task == 'depth': print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format( index, avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11])) torch.save(single_task_model.state_dict(), f"models/single-{opt.task}-{opt.seed}.pt") """ =========== Universal Gradient Manipulation Multi-task Trainer =========== """ def multi_task_rg_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200): method = opt.method alpha = opt.alpha niter = opt.niter # warm_niter = opt.warm_niter def graddrop(grads): P = 0.5 * (1. + grads.sum(1) / (grads.abs().sum(1) + 1e-8)) U = torch.rand_like(grads[:, 0]) M = P.gt(U).view(-1, 1) * grads.gt(0) + P.lt(U).view(-1, 1) * grads.lt(0) g = (grads * M.float()).mean(1) return g def mgd(grads): grads_cpu = grads.t().cpu() sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])]) w = torch.FloatTensor(sol).to(grads.device) g = grads.mm(w.view(-1, 1)).view(-1) return g def pcgrad(grads, rng): grad_vec = grads.t() num_tasks = 2 shuffled_task_indices = np.zeros((num_tasks, num_tasks - 1), dtype=int) for i in range(num_tasks): task_indices = np.arange(num_tasks) task_indices[i] = task_indices[-1] shuffled_task_indices[i] = task_indices[:-1] rng.shuffle(shuffled_task_indices[i]) shuffled_task_indices = shuffled_task_indices.T normalized_grad_vec = grad_vec / (grad_vec.norm(dim=1, keepdim=True) + 1e-8) # num_tasks x dim modified_grad_vec = deepcopy(grad_vec) for task_indices in shuffled_task_indices: normalized_shuffled_grad = normalized_grad_vec[task_indices] # num_tasks x dim dot = (modified_grad_vec * normalized_shuffled_grad).sum(dim=1, keepdim=True) # num_tasks x dim modified_grad_vec -= torch.clamp_max(dot, 0) * normalized_shuffled_grad g = modified_grad_vec.mean(dim=0) return g def cagrad(grads, alpha=0.5, rescale=0): g1 = grads[:, 0] g2 = grads[:, 1] g11 = g1.dot(g1).item() g12 = g1.dot(g2).item() g22 = g2.dot(g2).item() g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12) # want to minimize g_w^Tg_0 + c*||g_0||*||g_w|| coef = alpha * g0_norm def obj(x): # g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22 # g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22 return coef * np.sqrt(x**2 * (g11 + g22 - 2 * g12) + 2 * x * (g12 - g22) + g22 + 1e-8) + 0.5 * x * (g11 + g22 - 2 * g12) + (0.5 + x) * (g12 - g22) + g22 res = minimize_scalar(obj, bounds=(0, 1), method='bounded') x = res.x gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-8) lmbda = coef / (gw_norm + 1e-8) g = (0.5 + lmbda * x) * g1 + (0.5 + lmbda * (1 - x)) * g2 # g0 + lmbda*gw if rescale == 0: return g elif rescale == 1: return g / (1 + alpha**2) else: return g / (1 + alpha) def sdmgrad(w, grads, alpha, niter=20): GG = torch.mm(grads.t(), grads) scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4)) GG = GG / scale.pow(2) Gg = torch.mean(GG, dim=1) gg = torch.mean(Gg) w.requires_grad = True optimizer = torch.optim.SGD([w], lr=10, momentum=0.5) for i in range(niter): optimizer.zero_grad() obj = torch.dot(w, torch.mv(GG, w)) + 2 * alpha * torch.dot(w, Gg) + alpha**2 * gg obj.backward() optimizer.step() proj = euclidean_proj_simplex(w.data.cpu().numpy()) w.data.copy_(torch.from_numpy(proj).data) w.requires_grad = False g0 = torch.mean(grads, dim=1) gw = torch.mv(grads, w) g = (gw + alpha * g0) / (1 + alpha) return g def grad2vec(m, grads, grad_dims, task): # store the gradients grads[:, task].fill_(0.0) cnt = 0 for mm in m.shared_modules(): for p in mm.parameters(): grad = p.grad if grad is not None: grad_cur = grad.data.detach().clone() beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) grads[beg:en, task].copy_(grad_cur.data.view(-1)) cnt += 1 def overwrite_grad(m, newgrad, grad_dims): newgrad = newgrad * 2 # to match the sum loss cnt = 0 for mm in m.shared_modules(): for param in mm.parameters(): beg = 0 if cnt == 0 else sum(grad_dims[:cnt]) en = sum(grad_dims[:cnt + 1]) this_grad = newgrad[beg:en].contiguous().view(param.data.size()) param.grad = this_grad.data.clone() cnt += 1 rng = np.random.default_rng() grad_dims = [] for mm in multi_task_model.shared_modules(): for param in mm.parameters(): grad_dims.append(param.data.numel()) grads = torch.Tensor(sum(grad_dims), 2).cuda() w = 1 / 2 * torch.ones(2).cuda() train_batch = len(train_loader) test_batch = len(test_loader) T = opt.temp avg_cost = np.zeros([total_epoch, 12], dtype=np.float32) lambda_weight = np.ones([2, total_epoch]) for index in range(total_epoch): t0 = time.time() cost = np.zeros(12, dtype=np.float32) # apply Dynamic Weight Average if opt.weight == 'dwa': if index == 0 or index == 1: lambda_weight[:, index] = 1.0 else: w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0] w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3] lambda_weight[0, index] = 2 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T)) lambda_weight[1, index] = 2 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T)) # iteration for all batches multi_task_model.train() train_dataset = iter(train_loader) conf_mat = ConfMatrix(multi_task_model.class_nb) for k in range(train_batch): train_data, train_label, train_depth = train_dataset.next() train_data, train_label = train_data.to(device), train_label.long().to(device) train_depth = train_depth.to(device) train_pred, logsigma = multi_task_model(train_data) train_loss = [ model_fit(train_pred[0], train_label, 'semantic'), model_fit(train_pred[1], train_depth, 'depth') ] train_loss_tmp = [0, 0] if opt.weight == 'equal' or opt.weight == 'dwa': for i in range(2): train_loss_tmp[i] = train_loss[i] * lambda_weight[i, index] else: for i in range(2): train_loss_tmp[i] = 1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 optimizer.zero_grad() if method == "graddrop": for i in range(2): if i == 0: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = graddrop(grads) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "pcgrad": for i in range(2): if i == 0: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = pcgrad(grads, rng) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "mgd": for i in range(2): if i == 0: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = mgd(grads) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "cagrad": for i in range(2): if i == 0: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = cagrad(grads, alpha, rescale=1) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() elif method == "sdmgrad": for i in range(2): if i == 0: train_loss_tmp[i].backward(retain_graph=True) else: train_loss_tmp[i].backward() grad2vec(multi_task_model, grads, grad_dims, i) multi_task_model.zero_grad_shared_modules() g = sdmgrad(w, grads, alpha, niter=niter) overwrite_grad(multi_task_model, g, grad_dims) optimizer.step() # accumulate label prediction for every pixel in training images conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten()) cost[0] = train_loss[0].item() cost[3] = train_loss[1].item() cost[4], cost[5] = depth_error(train_pred[1], train_depth) avg_cost[index, :6] += cost[:6] / train_batch # compute mIoU and acc avg_cost[index, 1:3] = conf_mat.get_metrics() # evaluating test data multi_task_model.eval() conf_mat = ConfMatrix(multi_task_model.class_nb) with torch.no_grad(): # operations inside don't track history test_dataset = iter(test_loader) for k in range(test_batch): test_data, test_label, test_depth = test_dataset.next() test_data, test_label = test_data.to(device), test_label.long().to(device) test_depth = test_depth.to(device) test_pred, _ = multi_task_model(test_data) test_loss = [ model_fit(test_pred[0], test_label, 'semantic'), model_fit(test_pred[1], test_depth, 'depth') ] conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten()) cost[6] = test_loss[0].item() cost[9] = test_loss[1].item() cost[10], cost[11] = depth_error(test_pred[1], test_depth) avg_cost[index, 6:] += cost[6:] / test_batch # compute mIoU and acc avg_cost[index, 7:9] = conf_mat.get_metrics() scheduler.step() t1 = time.time() print( 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} || TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | TIME: {:.4f}' .format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], t1 - t0)) torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{opt.seed}.pt")
27,394
40.25753
148
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_split.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Split') parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(2): if j < 1: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) #self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 2 for _ in range(2)) for i in range(2): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(2): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(2): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) #t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) #t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred], self.logsigma # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_SPLIT = SegNet().to(device) optimizer = optim.Adam(SegNet_SPLIT.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_SPLIT), count_parameters(SegNet_SPLIT) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_trainer(train_loader, test_loader, SegNet_SPLIT, device, optimizer, scheduler, opt, 200)
11,395
50.103139
119
py
sdmgrad
sdmgrad-main/cityscapes/min_norm_solvers.py
# This code is from # Multi-Task Learning as Multi-Objective Optimization # Ozan Sener, Vladlen Koltun # Neural Information Processing Systems (NeurIPS) 2018 import numpy as np import torch class MinNormSolver: MAX_ITER = 20 STOP_CRIT = 1e-5 def _min_norm_element_from2(v1v1, v1v2, v2v2): """ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2 d is the distance (objective) optimzed v1v1 = <x1,x1> v1v2 = <x1,x2> v2v2 = <x2,x2> """ if v1v2 >= v1v1: # Case: Fig 1, third column gamma = 0.999 cost = v1v1 return gamma, cost if v1v2 >= v2v2: # Case: Fig 1, first column gamma = 0.001 cost = v2v2 return gamma, cost # Case: Fig 1, second column gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2)) cost = v2v2 + gamma * (v1v2 - v2v2) return gamma, cost def _min_norm_2d(vecs, dps): """ Find the minimum norm solution as combination of two points This is correct only in 2D ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j """ dmin = np.inf for i in range(len(vecs)): for j in range(i + 1, len(vecs)): if (i, j) not in dps: dps[(i, j)] = (vecs[i] * vecs[j]).sum().item() dps[(j, i)] = dps[(i, j)] if (i, i) not in dps: dps[(i, i)] = (vecs[i] * vecs[i]).sum().item() if (j, j) not in dps: dps[(j, j)] = (vecs[j] * vecs[j]).sum().item() c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)]) if d < dmin: dmin = d sol = [(i, j), c, d] return sol, dps def _projection2simplex(y): """ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i """ m = len(y) sorted_y = np.flip(np.sort(y), axis=0) tmpsum = 0.0 tmax_f = (np.sum(y) - 1.0) / m for i in range(m - 1): tmpsum += sorted_y[i] tmax = (tmpsum - 1) / (i + 1.0) if tmax > sorted_y[i + 1]: tmax_f = tmax break return np.maximum(y - tmax_f, np.zeros(y.shape)) def _next_point(cur_val, grad, n): proj_grad = grad - (np.sum(grad) / n) tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0] tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0]) skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7) t = 1 if len(tm1[tm1 > 1e-7]) > 0: t = np.min(tm1[tm1 > 1e-7]) if len(tm2[tm2 > 1e-7]) > 0: t = min(t, np.min(tm2[tm2 > 1e-7])) next_point = proj_grad * t + cur_val next_point = MinNormSolver._projection2simplex(next_point) return next_point def find_min_norm_element(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: grad_dir = -1.0 * np.dot(grad_mat, sol_vec) new_point = MinNormSolver._next_point(sol_vec, grad_dir, n) # Re-compute the inner products for line search v1v1 = 0.0 v1v2 = 0.0 v2v2 = 0.0 for i in range(n): for j in range(n): v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)] v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)] v2v2 += new_point[i] * new_point[j] * dps[(i, j)] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec + (1 - nc) * new_point change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def find_min_norm_element_FW(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}) Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MinNormSolver.MAX_ITER: t_iter = np.argmin(np.dot(grad_mat, sol_vec)) v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec)) v1v2 = np.dot(sol_vec, grad_mat[:, t_iter]) v2v2 = grad_mat[t_iter, t_iter] nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec new_sol_vec[t_iter] += 1 - nc change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec def gradient_normalizers(grads, losses, normalization_type): gn = {} if normalization_type == 'l2': for t in grads: gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'loss': for t in grads: gn[t] = losses[t] elif normalization_type == 'loss+': for t in grads: gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]])) elif normalization_type == 'none': for t in grads: gn[t] = 1.0 else: print('ERROR: Invalid Normalization Type') return gn
7,358
35.979899
147
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_mtan.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Attention Network') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--seed', default=0, type=int, help='control seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(2): if j < 1: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) #self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 2 for _ in range(2)) for i in range(2): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(2): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(2): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) #t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) #t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred], self.logsigma control_seed(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_MTAN = SegNet().to(device) optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN), count_parameters(SegNet_MTAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_trainer(train_loader, test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
11,396
49.879464
119
py
sdmgrad
sdmgrad-main/cityscapes/create_dataset.py
from torch.utils.data.dataset import Dataset import os import torch import torch.nn.functional as F import fnmatch import numpy as np import random class RandomScaleCrop(object): """ Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34. """ def __init__(self, scale=[1.0, 1.2, 1.5]): self.scale = scale def __call__(self, img, label, depth, normal): height, width = img.shape[-2:] sc = self.scale[random.randint(0, len(self.scale) - 1)] h, w = int(height / sc), int(width / sc) i = random.randint(0, height - h) j = random.randint(0, width - w) img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear', align_corners=True).squeeze(0) label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0).squeeze(0) depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0) normal_ = F.interpolate(normal[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear', align_corners=True).squeeze(0) return img_, label_, depth_ / sc, normal_ class RandomScaleCropCityScapes(object): """ Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34. """ def __init__(self, scale=[1.0, 1.2, 1.5]): self.scale = scale def __call__(self, img, label, depth): height, width = img.shape[-2:] sc = self.scale[random.randint(0, len(self.scale) - 1)] h, w = int(height / sc), int(width / sc) i = random.randint(0, height - h) j = random.randint(0, width - w) img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear', align_corners=True).squeeze(0) label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0).squeeze(0) depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0) return img_, label_, depth_ / sc class NYUv2(Dataset): """ We could further improve the performance with the data augmentation of NYUv2 defined in: [1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing [2] Pattern affinitive propagation across depth, surface normal and semantic segmentation [3] Mti-net: Multiscale task interaction networks for multi-task learning 1. Random scale in a selected raio 1.0, 1.2, and 1.5. 2. Random horizontal flip. Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper. """ def __init__(self, root, train=True, augmentation=False): self.train = train self.root = os.path.expanduser(root) self.augmentation = augmentation # read the data file if train: self.data_path = root + '/train' else: self.data_path = root + '/val' # calculate data length self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy')) def __getitem__(self, index): # load data from the pre-processed npy files image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0)) semantic = torch.from_numpy(np.load(self.data_path + '/label/{:d}.npy'.format(index))) depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0)) normal = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/normal/{:d}.npy'.format(index)), -1, 0)) # apply data augmentation if required if self.augmentation: image, semantic, depth, normal = RandomScaleCrop()(image, semantic, depth, normal) if torch.rand(1) < 0.5: image = torch.flip(image, dims=[2]) semantic = torch.flip(semantic, dims=[1]) depth = torch.flip(depth, dims=[2]) normal = torch.flip(normal, dims=[2]) normal[0, :, :] = -normal[0, :, :] return image.float(), semantic.float(), depth.float(), normal.float() def __len__(self): return self.data_len class CityScapes(Dataset): """ We could further improve the performance with the data augmentation of NYUv2 defined in: [1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing [2] Pattern affinitive propagation across depth, surface normal and semantic segmentation [3] Mti-net: Multiscale task interaction networks for multi-task learning 1. Random scale in a selected raio 1.0, 1.2, and 1.5. 2. Random horizontal flip. Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper. """ def __init__(self, root, train=True, augmentation=False): self.train = train self.root = os.path.expanduser(root) self.augmentation = augmentation # read the data file if train: self.data_path = root + '/train' else: self.data_path = root + '/val' # calculate data length self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy')) def __getitem__(self, index): # load data from the pre-processed npy files image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0)) semantic = torch.from_numpy(np.load(self.data_path + '/label_7/{:d}.npy'.format(index))) depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0)) # apply data augmentation if required if self.augmentation: image, semantic, depth = RandomScaleCropCityScapes()(image, semantic, depth) if torch.rand(1) < 0.5: image = torch.flip(image, dims=[2]) semantic = torch.flip(semantic, dims=[1]) depth = torch.flip(depth, dims=[2]) return image.float(), semantic.float(), depth.float() def __len__(self): return self.data_len
6,513
41.298701
127
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_cross.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Cross') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--seed', default=0, type=int, help='control seed') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block_t = nn.ModuleList( [nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])]) self.decoder_block_t = nn.ModuleList( [nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])]) for j in range(2): if j < 1: self.encoder_block_t.append( nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])) self.decoder_block_t.append( nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])) for i in range(4): if i == 0: self.encoder_block_t[j].append( self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=True)) self.decoder_block_t[j].append( self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=True)) else: self.encoder_block_t[j].append( self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=False)) self.decoder_block_t[j].append( self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=False)) # define cross-stitch units self.cs_unit_encoder = nn.Parameter(data=torch.ones(4, 2)) self.cs_unit_decoder = nn.Parameter(data=torch.ones(5, 2)) # define task specific layers self.pred_task1 = self.conv_layer([filter[0], self.class_nb], bottle_neck=True, pred_layer=True) self.pred_task2 = self.conv_layer([filter[0], 1], bottle_neck=True, pred_layer=True) #self.pred_task3 = self.conv_layer([filter[0], 3], bottle_neck=True, pred_layer=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Parameter): nn.init.constant(m.weight, 1) def conv_layer(self, channel, bottle_neck, pred_layer=False): if bottle_neck: if not pred_layer: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1), nn.BatchNorm2d(channel[2]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1), nn.BatchNorm2d(channel[2]), nn.ReLU(inplace=True), ) return conv_block def forward(self, x): encoder_conv_t, decoder_conv_t, encoder_samp_t, decoder_samp_t, indices_t = ([0] * 2 for _ in range(5)) for i in range(2): encoder_conv_t[i], decoder_conv_t[i], encoder_samp_t[i], decoder_samp_t[i], indices_t[i] = ( [0] * 5 for _ in range(5)) # task branch 1 for i in range(5): for j in range(2): if i == 0: encoder_conv_t[j][i] = self.encoder_block_t[j][i](x) encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i]) else: encoder_cross_stitch = self.cs_unit_encoder[i - 1][0] * encoder_samp_t[0][i - 1] + \ self.cs_unit_encoder[i - 1][1] * encoder_samp_t[1][i - 1] #self.cs_unit_encoder[i - 1][2] * encoder_samp_t[2][i - 1] encoder_conv_t[j][i] = self.encoder_block_t[j][i](encoder_cross_stitch) encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i]) for i in range(5): for j in range(2): if i == 0: decoder_cross_stitch = self.cs_unit_decoder[i][0] * encoder_samp_t[0][-1] + \ self.cs_unit_decoder[i][1] * encoder_samp_t[1][-1] #self.cs_unit_decoder[i][2] * encoder_samp_t[2][-1] decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1]) decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i]) else: decoder_cross_stitch = self.cs_unit_decoder[i][0] * decoder_conv_t[0][i - 1] + \ self.cs_unit_decoder[i][1] * decoder_conv_t[1][i - 1] #self.cs_unit_decoder[i][2] * decoder_conv_t[2][i - 1] decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1]) decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i]) # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(decoder_conv_t[0][-1]), dim=1) t2_pred = self.pred_task2(decoder_conv_t[1][-1]) #t3_pred = self.pred_task3(decoder_conv_t[2][-1]) #t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred], self.logsigma control_seed(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_CROSS = SegNet().to(device) optimizer = optim.Adam(SegNet_CROSS.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_CROSS), count_parameters(SegNet_CROSS) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation on CityScapes.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_trainer(train_loader, test_loader, SegNet_CROSS, device, optimizer, scheduler, opt, 200)
9,044
48.42623
119
py
sdmgrad
sdmgrad-main/cityscapes/model_segnet_mt.py
import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import argparse import torch.utils.data.sampler as sampler from create_dataset import * from utils import * parser = argparse.ArgumentParser(description='Multi-task: Attention Network') parser.add_argument('--method', default='sdmgrad', type=str, help='which optimization algorithm to use') parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa') parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root') parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)') parser.add_argument('--alpha', default=0.3, type=float, help='the alpha') parser.add_argument('--lr', default=1e-4, type=float, help='the learning rate') parser.add_argument('--seed', default=1, type=int, help='control seed') parser.add_argument('--niter', default=20, type=int, help='number of inner iteration') parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2') opt = parser.parse_args() class SegNet(nn.Module): def __init__(self): super(SegNet, self).__init__() # initialise network parameters filter = [64, 128, 256, 512, 512] self.class_nb = 7 # define encoder decoder layers self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])]) self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]])) self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]])) # define convolution layer self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for i in range(4): if i == 0: self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]])) else: self.conv_block_enc.append( nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]), self.conv_layer([filter[i + 1], filter[i + 1]]))) self.conv_block_dec.append( nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]]))) # define task attention layers self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])]) self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])]) self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])]) self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])]) for j in range(2): if j < 1: self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])) self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])) for i in range(4): self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]])) self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]])) for i in range(4): if i < 3: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]])) else: self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]])) self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True) self.pred_task2 = self.conv_layer([filter[0], 1], pred=True) #self.pred_task3 = self.conv_layer([filter[0], 3], pred=True) # define pooling and unpooling functions self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2) self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def shared_modules(self): return [ self.encoder_block, self.decoder_block, self.conv_block_enc, self.conv_block_dec, self.encoder_block_att, self.decoder_block_att, self.down_sampling, self.up_sampling ] def zero_grad_shared_modules(self): for mm in self.shared_modules(): mm.zero_grad() def conv_layer(self, channel, pred=False): if not pred: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1), nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True), ) else: conv_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1), nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), ) return conv_block def att_layer(self, channel): att_block = nn.Sequential( nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0), nn.BatchNorm2d(channel[1]), nn.ReLU(inplace=True), nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0), nn.BatchNorm2d(channel[2]), nn.Sigmoid(), ) return att_block def forward(self, x): g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5)) for i in range(5): g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2)) # define attention list for tasks atten_encoder, atten_decoder = ([0] * 2 for _ in range(2)) for i in range(2): atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2)) for i in range(2): for j in range(5): atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2)) # define global shared network for i in range(5): if i == 0: g_encoder[i][0] = self.encoder_block[i](x) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) else: g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1]) g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0]) g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1]) for i in range(5): if i == 0: g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) else: g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1]) g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i]) g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0]) # define task dependent attention module for i in range(2): for j in range(5): if j == 0: atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0]) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) else: atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat( (g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1)) atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1] atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1]) atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2) for j in range(5): if j == 0: atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] else: atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True) atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0]) atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat( (g_upsampl[j], atten_decoder[i][j][0]), dim=1)) atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1] # define task prediction layers t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1) t2_pred = self.pred_task2(atten_decoder[1][-1][-1]) #t3_pred = self.pred_task3(atten_decoder[2][-1][-1]) #t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True) return [t1_pred, t2_pred], self.logsigma control_seed(opt.seed) # define model, optimiser and scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SegNet_MTAN = SegNet().to(device) optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=opt.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN), count_parameters(SegNet_MTAN) / 24981069)) print( 'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30') # define dataset dataset_path = opt.dataroot if opt.apply_augmentation: train_set = CityScapes(root=dataset_path, train=True, augmentation=True) print('Applying data augmentation.') else: train_set = CityScapes(root=dataset_path, train=True) print('Standard training strategy without data augmentation.') test_set = CityScapes(root=dataset_path, train=False) batch_size = 8 train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False) # Train and evaluate multi-task network multi_task_rg_trainer(train_loader, test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
12,105
49.865546
119
py
SyNet
SyNet-master/CenterNet/src/main.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import _init_paths import os import torch import torch.utils.data from opts import opts from models.model import create_model, load_model, save_model from models.data_parallel import DataParallel from logger import Logger from datasets.dataset_factory import get_dataset from trains.train_factory import train_factory def main(opt): torch.manual_seed(opt.seed) torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test Dataset = get_dataset(opt.dataset, opt.task) opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) logger = Logger(opt) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu') print('Creating model...') model = create_model(opt.arch, opt.heads, opt.head_conv) optimizer = torch.optim.Adam(model.parameters(), opt.lr) start_epoch = 0 if opt.load_model != '': model, optimizer, start_epoch = load_model( model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step) Trainer = train_factory[opt.task] trainer = Trainer(opt, model, optimizer) trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device) print('Setting up data...') val_loader = torch.utils.data.DataLoader( Dataset(opt, 'val'), batch_size=1, shuffle=False, num_workers=1, pin_memory=True ) if opt.test: _, preds = trainer.val(0, val_loader) val_loader.dataset.run_eval(preds, opt.save_dir) return train_loader = torch.utils.data.DataLoader( Dataset(opt, 'train'), batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers, pin_memory=True, drop_last=True ) print('Starting training...') best = 1e10 for epoch in range(start_epoch + 1, opt.num_epochs + 1): mark = epoch if opt.save_all else 'last' log_dict_train, _ = trainer.train(epoch, train_loader) logger.write('epoch: {} |'.format(epoch)) for k, v in log_dict_train.items(): logger.scalar_summary('train_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) if opt.val_intervals > 0 and epoch % opt.val_intervals == 0: save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), epoch, model, optimizer) with torch.no_grad(): log_dict_val, preds = trainer.val(epoch, val_loader) for k, v in log_dict_val.items(): logger.scalar_summary('val_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) if log_dict_val[opt.metric] < best: best = log_dict_val[opt.metric] save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch, model) else: save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch, model, optimizer) logger.write('\n') if epoch in opt.lr_step: save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), epoch, model, optimizer) lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1)) print('Drop LR to', lr) for param_group in optimizer.param_groups: param_group['lr'] = lr logger.close() if __name__ == '__main__': opt = opts().parse() main(opt)
3,348
31.833333
78
py
SyNet
SyNet-master/CenterNet/src/demo.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import _init_paths import os import cv2 from opts import opts from detectors.detector_factory import detector_factory image_ext = ['jpg', 'jpeg', 'png', 'webp'] video_ext = ['mp4', 'mov', 'avi', 'mkv'] time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] def demo(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str opt.debug = max(opt.debug, 1) Detector = detector_factory[opt.task] detector = Detector(opt) if opt.demo == 'webcam' or \ opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext: cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo) detector.pause = False while True: _, img = cam.read() cv2.imshow('input', img) ret = detector.run(img) time_str = '' for stat in time_stats: time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat]) print(time_str) if cv2.waitKey(1) == 27: return # esc to quit else: if os.path.isdir(opt.demo): image_names = [] ls = os.listdir(opt.demo) for file_name in sorted(ls): ext = file_name[file_name.rfind('.') + 1:].lower() if ext in image_ext: image_names.append(os.path.join(opt.demo, file_name)) else: image_names = [opt.demo] for (image_name) in image_names: ret = detector.run(image_name) time_str = '' for stat in time_stats: time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat]) print(time_str) if __name__ == '__main__': opt = opts().init() demo(opt)
1,674
28.385965
70
py
SyNet
SyNet-master/CenterNet/src/tools/merge_pascal_json.py
import json # ANNOT_PATH = '/home/zxy/Datasets/VOC/annotations/' ANNOT_PATH = 'voc/annotations/' OUT_PATH = ANNOT_PATH INPUT_FILES = ['pascal_train2012.json', 'pascal_val2012.json', 'pascal_train2007.json', 'pascal_val2007.json'] OUTPUT_FILE = 'pascal_trainval0712.json' KEYS = ['images', 'type', 'annotations', 'categories'] MERGE_KEYS = ['images', 'annotations'] out = {} tot_anns = 0 for i, file_name in enumerate(INPUT_FILES): data = json.load(open(ANNOT_PATH + file_name, 'r')) print('keys', data.keys()) if i == 0: for key in KEYS: out[key] = data[key] print(file_name, key, len(data[key])) else: out['images'] += data['images'] for j in range(len(data['annotations'])): data['annotations'][j]['id'] += tot_anns out['annotations'] += data['annotations'] print(file_name, 'images', len(data['images'])) print(file_name, 'annotations', len(data['annotations'])) tot_anns = len(out['annotations']) print('tot', len(out['annotations'])) json.dump(out, open(OUT_PATH + OUTPUT_FILE, 'w'))
1,058
33.16129
62
py
SyNet
SyNet-master/CenterNet/src/tools/eval_coco_hp.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as coco from pycocotools.cocoeval import COCOeval import sys import cv2 import numpy as np import pickle import os this_dir = os.path.dirname(__file__) ANN_PATH = this_dir + '../../data/coco/annotations/person_keypoints_val2017.json' print(ANN_PATH) if __name__ == '__main__': pred_path = sys.argv[1] coco = coco.COCO(ANN_PATH) dets = coco.loadRes(pred_path) img_ids = coco.getImgIds() num_images = len(img_ids) coco_eval = COCOeval(coco, dets, "keypoints") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() coco_eval = COCOeval(coco, dets, "bbox") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
795
24.677419
81
py
SyNet
SyNet-master/CenterNet/src/tools/reval.py
# Fast R-CNN # Modified by Xingyi Zhou # Reval = re-eval. Re-evaluate saved detections. from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os.path as osp sys.path.insert(0, osp.join(osp.dirname(__file__), 'voc_eval_lib')) from model.test import apply_nms from datasets.pascal_voc import pascal_voc import pickle import os, argparse import numpy as np import json def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Re-evaluate results') parser.add_argument('detection_file', type=str) parser.add_argument('--output_dir', help='results directory', type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to re-evaluate', default='voc_2007_test', type=str) parser.add_argument('--matlab', dest='matlab_eval', help='use matlab for evaluation', action='store_true') parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true') parser.add_argument('--nms', dest='apply_nms', help='apply nms', action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args def from_dets(imdb_name, detection_file, args): imdb = pascal_voc('test', '2007') imdb.competition_mode(args.comp_mode) imdb.config['matlab_eval'] = args.matlab_eval with open(os.path.join(detection_file), 'rb') as f: if 'json' in detection_file: dets = json.load(f) else: dets = pickle.load(f, encoding='latin1') # import pdb; pdb.set_trace() if args.apply_nms: print('Applying NMS to all detections') test_nms = 0.3 nms_dets = apply_nms(dets, test_nms) else: nms_dets = dets print('Evaluating detections') imdb.evaluate_detections(nms_dets) if __name__ == '__main__': args = parse_args() imdb_name = args.imdb_name from_dets(imdb_name, args.detection_file, args)
2,331
28.518987
74
py
SyNet
SyNet-master/CenterNet/src/tools/convert_kitti_to_coco.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pickle import json import numpy as np import cv2 DATA_PATH = '../../data/kitti/' DEBUG = False # VAL_PATH = DATA_PATH + 'training/label_val/' import os SPLITS = ['3dop', 'subcnn'] import _init_paths from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d ''' #Values Name Description ---------------------------------------------------------------------------- 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. ''' def _bbox_to_coco_bbox(bbox): return [(bbox[0]), (bbox[1]), (bbox[2] - bbox[0]), (bbox[3] - bbox[1])] def read_clib(calib_path): f = open(calib_path, 'r') for i, line in enumerate(f): if i == 2: calib = np.array(line[:-1].split(' ')[1:], dtype=np.float32) calib = calib.reshape(3, 4) return calib cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting', 'Tram', 'Misc', 'DontCare'] cat_ids = {cat: i + 1 for i, cat in enumerate(cats)} # cat_info = [{"name": "pedestrian", "id": 1}, {"name": "vehicle", "id": 2}] F = 721 H = 384 # 375 W = 1248 # 1242 EXT = [45.75, -0.34, 0.005] CALIB = np.array([[F, 0, W / 2, EXT[0]], [0, F, H / 2, EXT[1]], [0, 0, 1, EXT[2]]], dtype=np.float32) cat_info = [] for i, cat in enumerate(cats): cat_info.append({'name': cat, 'id': i + 1}) for SPLIT in SPLITS: image_set_path = DATA_PATH + 'ImageSets_{}/'.format(SPLIT) ann_dir = DATA_PATH + 'training/label_2/' calib_dir = DATA_PATH + '{}/calib/' splits = ['train', 'val'] # splits = ['trainval', 'test'] calib_type = {'train': 'training', 'val': 'training', 'trainval': 'training', 'test': 'testing'} for split in splits: ret = {'images': [], 'annotations': [], "categories": cat_info} image_set = open(image_set_path + '{}.txt'.format(split), 'r') image_to_id = {} for line in image_set: if line[-1] == '\n': line = line[:-1] image_id = int(line) calib_path = calib_dir.format(calib_type[split]) + '{}.txt'.format(line) calib = read_clib(calib_path) image_info = {'file_name': '{}.png'.format(line), 'id': int(image_id), 'calib': calib.tolist()} ret['images'].append(image_info) if split == 'test': continue ann_path = ann_dir + '{}.txt'.format(line) # if split == 'val': # os.system('cp {} {}/'.format(ann_path, VAL_PATH)) anns = open(ann_path, 'r') if DEBUG: image = cv2.imread( DATA_PATH + 'images/trainval/' + image_info['file_name']) for ann_ind, txt in enumerate(anns): tmp = txt[:-1].split(' ') cat_id = cat_ids[tmp[0]] truncated = int(float(tmp[1])) occluded = int(tmp[2]) alpha = float(tmp[3]) bbox = [float(tmp[4]), float(tmp[5]), float(tmp[6]), float(tmp[7])] dim = [float(tmp[8]), float(tmp[9]), float(tmp[10])] location = [float(tmp[11]), float(tmp[12]), float(tmp[13])] rotation_y = float(tmp[14]) ann = {'image_id': image_id, 'id': int(len(ret['annotations']) + 1), 'category_id': cat_id, 'dim': dim, 'bbox': _bbox_to_coco_bbox(bbox), 'depth': location[2], 'alpha': alpha, 'truncated': truncated, 'occluded': occluded, 'location': location, 'rotation_y': rotation_y} ret['annotations'].append(ann) if DEBUG and tmp[0] != 'DontCare': box_3d = compute_box_3d(dim, location, rotation_y) box_2d = project_to_image(box_3d, calib) # print('box_2d', box_2d) image = draw_box_3d(image, box_2d) x = (bbox[0] + bbox[2]) / 2 ''' print('rot_y, alpha2rot_y, dlt', tmp[0], rotation_y, alpha2rot_y(alpha, x, calib[0, 2], calib[0, 0]), np.cos( rotation_y - alpha2rot_y(alpha, x, calib[0, 2], calib[0, 0]))) ''' depth = np.array([location[2]], dtype=np.float32) pt_2d = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) pt_3d = unproject_2d_to_3d(pt_2d, depth, calib) pt_3d[1] += dim[0] / 2 print('pt_3d', pt_3d) print('location', location) if DEBUG: cv2.imshow('image', image) cv2.waitKey() print("# images: ", len(ret['images'])) print("# annotations: ", len(ret['annotations'])) # import pdb; pdb.set_trace() out_path = '{}/annotations/kitti_{}_{}.json'.format(DATA_PATH, SPLIT, split) json.dump(ret, open(out_path, 'w'))
5,935
37.797386
80
py
SyNet
SyNet-master/CenterNet/src/tools/calc_coco_overlap.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as COCO import cv2 import numpy as np from pycocotools import mask as maskUtils ANN_PATH = '../../data/coco/annotations/' IMG_PATH = '../../data/coco/' ANN_FILES = {'train': 'instances_train2017.json', 'val': 'instances_val2017.json'} DEBUG = False RESIZE = True class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] def iou(box1, box2): area1 = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1) area2 = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1) inter = max(min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1, 0) * \ max(min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1, 0) iou = 1.0 * inter / (area1 + area2 - inter) return iou def generate_anchors( stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2) ): """Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors are centered on stride / 2, have (approximate) sqrt areas of the specified sizes, and aspect ratios as given. """ return _generate_anchors( stride, np.array(sizes, dtype=np.float) / stride, np.array(aspect_ratios, dtype=np.float) ) def _generate_anchors(base_size, scales, aspect_ratios): """Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, base_size - 1, base_size - 1) window. """ anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1 anchors = _ratio_enum(anchor, aspect_ratios) anchors = np.vstack( [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])] ) return anchors def _whctrs(anchor): """Return width, height, x center, and y center for an anchor (window).""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def _mkanchors(ws, hs, x_ctr, y_ctr): """Given a vector of widths (ws) and heights (hs) around a center (x_ctr, y_ctr), output a set of anchors (windows). """ ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack( ( x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) ) ) return anchors def _ratio_enum(anchor, ratios): """Enumerate a set of anchors for each aspect ratio wrt an anchor.""" w, h, x_ctr, y_ctr = _whctrs(anchor) size = w * h size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors def _scale_enum(anchor, scales): """Enumerate a set of anchors for each scale wrt an anchor.""" w, h, x_ctr, y_ctr = _whctrs(anchor) ws = w * scales hs = h * scales anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors def _coco_box_to_bbox(box): bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32) return bbox def count_agnostic(split): coco = COCO.COCO(ANN_PATH + ANN_FILES[split]) images = coco.getImgIds() cnt = 0 for img_id in images: ann_ids = coco.getAnnIds(imgIds=[img_id]) anns = coco.loadAnns(ids=ann_ids) centers = [] for ann in anns: bbox = ann['bbox'] center = ((bbox[0] + bbox[2] / 2) // 4, (bbox[1] + bbox[3] / 2) // 4) for c in centers: if center[0] == c[0] and center[1] == c[1]: cnt += 1 centers.append(center) print('find {} collisions!'.format(cnt)) def count(split): coco = COCO.COCO(ANN_PATH + ANN_FILES[split]) images = coco.getImgIds() cnt = 0 obj = 0 for img_id in images: ann_ids = coco.getAnnIds(imgIds=[img_id]) anns = coco.loadAnns(ids=ann_ids) centers = [] obj += len(anns) for ann in anns: if ann['iscrowd'] > 0: continue bbox = ann['bbox'] center = ((bbox[0] + bbox[2] / 2) // 4, (bbox[1] + bbox[3] / 2) // 4, ann['category_id'], bbox) for c in centers: if center[0] == c[0] and center[1] == c[1] and center[2] == c[2] and \ iou(_coco_box_to_bbox(bbox), _coco_box_to_bbox(c[3])) < 2:# 0.5: cnt += 1 if DEBUG: file_name = coco.loadImgs(ids=[img_id])[0]['file_name'] img = cv2.imread('{}/{}2017/{}'.format(IMG_PATH, split, file_name)) x1, y1 = int(c[3][0]), int(c[3][1]), x2, y2 = int(c[3][0] + c[3][2]), int(c[3][1] + c[3][3]) cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2, cv2.LINE_AA) x1, y1 = int(center[3][0]), int(center[3][1]), x2, y2 = int(center[3][0] + center[3][2]), int(center[3][1] + center[3][3]) cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2, cv2.LINE_AA) cv2.imshow('img', img) cv2.waitKey() centers.append(center) print('find {} collisions of {} objects!'.format(cnt, obj)) def count_iou(split): coco = COCO.COCO(ANN_PATH + ANN_FILES[split]) images = coco.getImgIds() cnt = 0 obj = 0 for img_id in images: ann_ids = coco.getAnnIds(imgIds=[img_id]) anns = coco.loadAnns(ids=ann_ids) bboxes = [] obj += len(anns) for ann in anns: if ann['iscrowd'] > 0: continue bbox = _coco_box_to_bbox(ann['bbox']).tolist() + [ann['category_id']] for b in bboxes: if iou(b, bbox) > 0.5 and b[4] == bbox[4]: cnt += 1 if DEBUG: file_name = coco.loadImgs(ids=[img_id])[0]['file_name'] img = cv2.imread('{}/{}2017/{}'.format(IMG_PATH, split, file_name)) x1, y1 = int(b[0]), int(b[1]), x2, y2 = int(b[2]), int(b[3]) cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2, cv2.LINE_AA) x1, y1 = int(bbox[0]), int(bbox[1]), x2, y2 = int(bbox[2]), int(bbox[3]) cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2, cv2.LINE_AA) cv2.imshow('img', img) print('cats', class_name[b[4]], class_name[bbox[4]]) cv2.waitKey() bboxes.append(bbox) print('find {} collisions of {} objects!'.format(cnt, obj)) def count_anchor(split): coco = COCO.COCO(ANN_PATH + ANN_FILES[split]) images = coco.getImgIds() cnt = 0 obj = 0 stride = 16 anchor = generate_anchors().reshape(15, 2, 2) miss_s, miss_m, miss_l = 0, 0, 0 N = len(images) print(N, 'images') for ind, img_id in enumerate(images): if ind % 1000 == 0: print(ind, N) anchors = [] ann_ids = coco.getAnnIds(imgIds=[img_id]) anns = coco.loadAnns(ids=ann_ids) obj += len(anns) img_info = coco.loadImgs(ids=[img_id])[0] h, w = img_info['height'], img_info['width'] if RESIZE: if h > w: for i in range(len(anns)): anns[i]['bbox'][0] *= 800 / w anns[i]['bbox'][1] *= 800 / w anns[i]['bbox'][2] *= 800 / w anns[i]['bbox'][3] *= 800 / w h = h * 800 // w w = 800 else: for i in range(len(anns)): anns[i]['bbox'][0] *= 800 / h anns[i]['bbox'][1] *= 800 / h anns[i]['bbox'][2] *= 800 / h anns[i]['bbox'][3] *= 800 / h w = w * 800 // h h = 800 for i in range(w // stride): for j in range(h // stride): ct = np.array([i * stride, j * stride], dtype=np.float32).reshape(1, 1, 2) anchors.append(anchor + ct) anchors = np.concatenate(anchors, axis=0).reshape(-1, 4) anchors[:, 2:4] = anchors[:, 2:4] - anchors[:, 0:2] anchors = anchors.tolist() # import pdb; pdb.set_trace() g = [g['bbox'] for g in anns] iscrowd = [int(o['iscrowd']) for o in anns] ious = maskUtils.iou(anchors,g,iscrowd) for t in range(len(g)): if ious[:, t].max() < 0.5: s = anns[t]['area'] if s < 32 ** 2: miss_s += 1 elif s < 96 ** 2: miss_m += 1 else: miss_l += 1 if DEBUG: file_name = coco.loadImgs(ids=[img_id])[0]['file_name'] img = cv2.imread('{}/{}2017/{}'.format(IMG_PATH, split, file_name)) if RESIZE: img = cv2.resize(img, (w, h)) for t, gt in enumerate(g): if anns[t]['iscrowd'] > 0: continue x1, y1, x2, y2 = _coco_box_to_bbox(gt) cl = (0, 0, 255) if ious[:, t].max() < 0.5 else (0, 255, 0) cv2.rectangle(img, (x1, y1), (x2, y2), cl, 2, cv2.LINE_AA) for k in range(len(anchors)): if ious[k, t] > 0.5: x1, y1, x2, y2 = _coco_box_to_bbox(anchors[k]) cl = (np.array([255, 0, 0]) * ious[k, t]).astype(np.int32).tolist() cv2.rectangle(img, (x1, y1), (x2, y2), cl, 1, cv2.LINE_AA) cv2.imshow('img', img) cv2.waitKey() miss = 0 if len(ious) > 0: miss = (ious.max(axis=0) < 0.5).sum() cnt += miss print('cnt, obj, ratio ', cnt, obj, cnt / obj) print('s, m, l ', miss_s, miss_m, miss_l) # import pdb; pdb.set_trace() def count_size(split): coco = COCO.COCO(ANN_PATH + ANN_FILES[split]) images = coco.getImgIds() cnt = 0 obj = 0 stride = 16 anchor = generate_anchors().reshape(15, 2, 2) cnt_s, cnt_m, cnt_l = 0, 0, 0 N = len(images) print(N, 'images') for ind, img_id in enumerate(images): anchors = [] ann_ids = coco.getAnnIds(imgIds=[img_id]) anns = coco.loadAnns(ids=ann_ids) obj += len(anns) img_info = coco.loadImgs(ids=[img_id])[0] for t in range(len(anns)): if 1: s = anns[t]['area'] if s < 32 ** 2: cnt_s += 1 elif s < 96 ** 2: cnt_m += 1 else: cnt_l += 1 cnt += 1 print('cnt', cnt) print('s, m, l ', cnt_s, cnt_m, cnt_l) # count_iou('train') # count_anchor('train') # count('train') count_size('train')
10,869
32.653251
101
py
SyNet
SyNet-master/CenterNet/src/tools/convert_hourglass_weight.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function MODEL_PATH = '../../models/ExtremeNet_500000.pkl' OUT_PATH = '../../models/ExtremeNet_500000.pth' import torch state_dict = torch.load(MODEL_PATH) key_map = {'t_heats': 'hm_t', 'l_heats': 'hm_l', 'b_heats': 'hm_b', \ 'r_heats': 'hm_r', 'ct_heats': 'hm_c', \ 't_regrs': 'reg_t', 'l_regrs': 'reg_l', \ 'b_regrs': 'reg_b', 'r_regrs': 'reg_r'} out = {} for k in state_dict.keys(): changed = False for m in key_map.keys(): if m in k: if 'ct_heats' in k and m == 't_heats': continue new_k = k.replace(m, key_map[m]) out[new_k] = state_dict[k] changed = True print('replace {} to {}'.format(k, new_k)) if not changed: out[k] = state_dict[k] data = {'epoch': 0, 'state_dict': out} torch.save(data, OUT_PATH)
905
28.225806
69
py
SyNet
SyNet-master/CenterNet/src/tools/voc_eval_lib/datasets/ds_utils.py
# Fast/er R-CNN from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def unique_boxes(boxes, scale=1.0): """Return indices of unique boxes.""" v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index) def xywh_to_xyxy(boxes): """Convert [x y w h] box format to [x1 y1 x2 y2] format.""" return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) def xyxy_to_xywh(boxes): """Convert [x1 y1 x2 y2] box format to [x y w h] format.""" return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1)) def validate_boxes(boxes, width=0, height=0): """Check that a set of boxes are valid.""" x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] assert (x1 >= 0).all() assert (y1 >= 0).all() assert (x2 >= x1).all() assert (y2 >= y1).all() assert (x2 < width).all() assert (y2 < height).all() def filter_small_boxes(boxes, min_size): w = boxes[:, 2] - boxes[:, 0] h = boxes[:, 3] - boxes[:, 1] keep = np.where((w >= min_size) & (h > min_size))[0] return keep
1,402
27.06
70
py
SyNet
SyNet-master/CenterNet/src/lib/opts.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys class opts(object): def __init__(self): self.parser = argparse.ArgumentParser() # basic experiment setting self.parser.add_argument('task', default='ctdet', help='ctdet | ddd | multi_pose | exdet') self.parser.add_argument('--dataset', default='visdrone', help='coco | kitti | coco_hp | pascal') self.parser.add_argument('--exp_id', default='default') self.parser.add_argument('--test', action='store_true') self.parser.add_argument('--debug', type=int, default=0, help='level of visualization.' '1: only show the final detection results' '2: show the network output features' '3: use matplot to display' # useful when lunching training with ipython notebook '4: save all visualizations to disk') self.parser.add_argument('--demo', default='', help='path to image/ image folders/ video. ' 'or "webcam"') self.parser.add_argument('--load_model', default='', help='path to pretrained model') self.parser.add_argument('--resume', action='store_true', help='resume an experiment. ' 'Reloaded the optimizer parameter and ' 'set load_model to model_last.pth ' 'in the exp dir if load_model is empty.') # system self.parser.add_argument('--gpus', default='0', help='-1 for CPU, use comma for multiple gpus') self.parser.add_argument('--num_workers', type=int, default=4, help='dataloader threads. 0 for single-thread.') self.parser.add_argument('--not_cuda_benchmark', action='store_true', help='disable when the input size is not fixed.') self.parser.add_argument('--seed', type=int, default=317, help='random seed') # from CornerNet # log self.parser.add_argument('--print_iter', type=int, default=0, help='disable progress bar and print to screen.') self.parser.add_argument('--hide_data_time', action='store_true', help='not display time during training.') self.parser.add_argument('--save_all', action='store_true', help='save model to disk every 5 epochs.') self.parser.add_argument('--metric', default='loss', help='main metric to save best model') self.parser.add_argument('--vis_thresh', type=float, default=0.3, help='visualization threshold.') self.parser.add_argument('--debugger_theme', default='white', choices=['white', 'black']) # model self.parser.add_argument('--arch', default='dla_34', help='model architecture. Currently tested' 'res_18 | res_101 | resdcn_18 | resdcn_101 |' 'dlav0_34 | dla_34 | hourglass') self.parser.add_argument('--head_conv', type=int, default=-1, help='conv layer channels for output head' '0 for no conv layer' '-1 for default setting: ' '64 for resnets and 256 for dla.') self.parser.add_argument('--down_ratio', type=int, default=4, help='output stride. Currently only supports 4.') # input self.parser.add_argument('--input_res', type=int, default=-1, help='input height and width. -1 for default from ' 'dataset. Will be overriden by input_h | input_w') self.parser.add_argument('--input_h', type=int, default=-1, help='input height. -1 for default from dataset.') self.parser.add_argument('--input_w', type=int, default=-1, help='input width. -1 for default from dataset.') # train self.parser.add_argument('--lr', type=float, default=1.25e-4, help='learning rate for batch size 32.') self.parser.add_argument('--lr_step', type=str, default='90,120', help='drop learning rate by 10.') self.parser.add_argument('--num_epochs', type=int, default=140, help='total training epochs.') self.parser.add_argument('--batch_size', type=int, default=32, help='batch size') self.parser.add_argument('--master_batch_size', type=int, default=-1, help='batch size on the master gpu.') self.parser.add_argument('--num_iters', type=int, default=-1, help='default: #samples / batch_size.') self.parser.add_argument('--val_intervals', type=int, default=5, help='number of epochs to run validation.') self.parser.add_argument('--trainval', action='store_true', help='include validation in training and ' 'test on test set') # test self.parser.add_argument('--flip_test', action='store_true', help='flip data augmentation.') self.parser.add_argument('--test_scales', type=str, default='1', help='multi scale test augmentation.') self.parser.add_argument('--nms', action='store_true', help='run nms in testing.') self.parser.add_argument('--K', type=int, default=100, help='max number of output objects.') self.parser.add_argument('--not_prefetch_test', action='store_true', help='not use parallal data pre-processing.') self.parser.add_argument('--fix_res', action='store_true', help='fix testing resolution or keep ' 'the original resolution') self.parser.add_argument('--keep_res', action='store_true', help='keep the original resolution' ' during validation.') # dataset self.parser.add_argument('--not_rand_crop', action='store_true', help='not use the random crop data augmentation' 'from CornerNet.') self.parser.add_argument('--shift', type=float, default=0.1, help='when not using random crop' 'apply shift augmentation.') self.parser.add_argument('--scale', type=float, default=0.4, help='when not using random crop' 'apply scale augmentation.') self.parser.add_argument('--rotate', type=float, default=0, help='when not using random crop' 'apply rotation augmentation.') self.parser.add_argument('--flip', type = float, default=0.5, help='probability of applying flip augmentation.') self.parser.add_argument('--no_color_aug', action='store_true', help='not use the color augmenation ' 'from CornerNet') # multi_pose self.parser.add_argument('--aug_rot', type=float, default=0, help='probability of applying ' 'rotation augmentation.') # ddd self.parser.add_argument('--aug_ddd', type=float, default=0.5, help='probability of applying crop augmentation.') self.parser.add_argument('--rect_mask', action='store_true', help='for ignored object, apply mask on the ' 'rectangular region or just center point.') self.parser.add_argument('--kitti_split', default='3dop', help='different validation split for kitti: ' '3dop | subcnn') # loss self.parser.add_argument('--mse_loss', action='store_true', help='use mse loss or focal loss to train ' 'keypoint heatmaps.') # ctdet self.parser.add_argument('--reg_loss', default='l1', help='regression loss: sl1 | l1 | l2') self.parser.add_argument('--hm_weight', type=float, default=1, help='loss weight for keypoint heatmaps.') self.parser.add_argument('--off_weight', type=float, default=1, help='loss weight for keypoint local offsets.') self.parser.add_argument('--wh_weight', type=float, default=0.1, help='loss weight for bounding box size.') # multi_pose self.parser.add_argument('--hp_weight', type=float, default=1, help='loss weight for human pose offset.') self.parser.add_argument('--hm_hp_weight', type=float, default=1, help='loss weight for human keypoint heatmap.') # ddd self.parser.add_argument('--dep_weight', type=float, default=1, help='loss weight for depth.') self.parser.add_argument('--dim_weight', type=float, default=1, help='loss weight for 3d bounding box size.') self.parser.add_argument('--rot_weight', type=float, default=1, help='loss weight for orientation.') self.parser.add_argument('--peak_thresh', type=float, default=0.2) # task # ctdet self.parser.add_argument('--norm_wh', action='store_true', help='L1(\hat(y) / y, 1) or L1(\hat(y), y)') self.parser.add_argument('--dense_wh', action='store_true', help='apply weighted regression near center or ' 'just apply regression on center point.') self.parser.add_argument('--cat_spec_wh', action='store_true', help='category specific bounding box size.') self.parser.add_argument('--not_reg_offset', action='store_true', help='not regress local offset.') # exdet self.parser.add_argument('--agnostic_ex', action='store_true', help='use category agnostic extreme points.') self.parser.add_argument('--scores_thresh', type=float, default=0.1, help='threshold for extreme point heatmap.') self.parser.add_argument('--center_thresh', type=float, default=0.1, help='threshold for centermap.') self.parser.add_argument('--aggr_weight', type=float, default=0.0, help='edge aggregation weight.') # multi_pose self.parser.add_argument('--dense_hp', action='store_true', help='apply weighted pose regression near center ' 'or just apply regression on center point.') self.parser.add_argument('--not_hm_hp', action='store_true', help='not estimate human joint heatmap, ' 'directly use the joint offset from center.') self.parser.add_argument('--not_reg_hp_offset', action='store_true', help='not regress local offset for ' 'human joint heatmaps.') self.parser.add_argument('--not_reg_bbox', action='store_true', help='not regression bounding box size.') # ground truth validation self.parser.add_argument('--eval_oracle_hm', action='store_true', help='use ground center heatmap.') self.parser.add_argument('--eval_oracle_wh', action='store_true', help='use ground truth bounding box size.') self.parser.add_argument('--eval_oracle_offset', action='store_true', help='use ground truth local heatmap offset.') self.parser.add_argument('--eval_oracle_kps', action='store_true', help='use ground truth human pose offset.') self.parser.add_argument('--eval_oracle_hmhp', action='store_true', help='use ground truth human joint heatmaps.') self.parser.add_argument('--eval_oracle_hp_offset', action='store_true', help='use ground truth human joint local offset.') self.parser.add_argument('--eval_oracle_dep', action='store_true', help='use ground truth depth.') def parse(self, args=''): if args == '': opt = self.parser.parse_args() else: opt = self.parser.parse_args(args) opt.gpus_str = opt.gpus opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')] opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >=0 else [-1] opt.lr_step = [int(i) for i in opt.lr_step.split(',')] opt.test_scales = [float(i) for i in opt.test_scales.split(',')] opt.fix_res = not opt.keep_res print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.') opt.reg_offset = not opt.not_reg_offset opt.reg_bbox = not opt.not_reg_bbox opt.hm_hp = not opt.not_hm_hp opt.reg_hp_offset = (not opt.not_reg_hp_offset) and opt.hm_hp if opt.head_conv == -1: # init default head_conv opt.head_conv = 256 if 'dla' in opt.arch else 64 opt.pad = 127 if 'hourglass' in opt.arch else 31 opt.num_stacks = 2 if opt.arch == 'hourglass' else 1 if opt.trainval: opt.val_intervals = 100000000 if opt.debug > 0: opt.num_workers = 0 opt.batch_size = 1 opt.gpus = [opt.gpus[0]] opt.master_batch_size = -1 if opt.master_batch_size == -1: opt.master_batch_size = opt.batch_size // len(opt.gpus) rest_batch_size = (opt.batch_size - opt.master_batch_size) opt.chunk_sizes = [opt.master_batch_size] for i in range(len(opt.gpus) - 1): slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1) if i < rest_batch_size % (len(opt.gpus) - 1): slave_chunk_size += 1 opt.chunk_sizes.append(slave_chunk_size) print('training chunk_sizes:', opt.chunk_sizes) opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..') opt.data_dir = os.path.join(opt.root_dir, 'data') opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task) opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id) opt.debug_dir = os.path.join(opt.save_dir, 'debug') print('The output will be saved to ', opt.save_dir) if opt.resume and opt.load_model == '': model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \ else opt.save_dir opt.load_model = os.path.join(model_path, 'model_last.pth') return opt def update_dataset_info_and_set_heads(self, opt, dataset): input_h, input_w = dataset.default_resolution opt.mean, opt.std = dataset.mean, dataset.std opt.num_classes = dataset.num_classes # input_h(w): opt.input_h overrides opt.input_res overrides dataset default input_h = opt.input_res if opt.input_res > 0 else input_h input_w = opt.input_res if opt.input_res > 0 else input_w opt.input_h = opt.input_h if opt.input_h > 0 else input_h opt.input_w = opt.input_w if opt.input_w > 0 else input_w opt.output_h = opt.input_h // opt.down_ratio opt.output_w = opt.input_w // opt.down_ratio opt.input_res = max(opt.input_h, opt.input_w) opt.output_res = max(opt.output_h, opt.output_w) if opt.task == 'exdet': # assert opt.dataset in ['coco'] num_hm = 1 if opt.agnostic_ex else opt.num_classes opt.heads = {'hm_t': num_hm, 'hm_l': num_hm, 'hm_b': num_hm, 'hm_r': num_hm, 'hm_c': opt.num_classes} if opt.reg_offset: opt.heads.update({'reg_t': 2, 'reg_l': 2, 'reg_b': 2, 'reg_r': 2}) elif opt.task == 'ddd': # assert opt.dataset in ['gta', 'kitti', 'viper'] opt.heads = {'hm': opt.num_classes, 'dep': 1, 'rot': 8, 'dim': 3} if opt.reg_bbox: opt.heads.update( {'wh': 2}) if opt.reg_offset: opt.heads.update({'reg': 2}) elif opt.task == 'ctdet': # assert opt.dataset in ['pascal', 'coco'] opt.heads = {'hm': opt.num_classes, 'wh': 2 if not opt.cat_spec_wh else 2 * opt.num_classes} if opt.reg_offset: opt.heads.update({'reg': 2}) elif opt.task == 'multi_pose': # assert opt.dataset in ['coco_hp'] opt.flip_idx = dataset.flip_idx opt.heads = {'hm': opt.num_classes, 'wh': 2, 'hps': 34} if opt.reg_offset: opt.heads.update({'reg': 2}) if opt.hm_hp: opt.heads.update({'hm_hp': 17}) if opt.reg_hp_offset: opt.heads.update({'hp_offset': 2}) else: assert 0, 'task not defined!' print('heads', opt.heads) return opt def init(self, args=''): default_dataset_info = { 'ctdet': {'default_resolution': [512, 512], 'num_classes': 10, 'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278], 'dataset': 'visdrone'}, 'exdet': {'default_resolution': [512, 512], 'num_classes': 80, 'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278], 'dataset': 'coco'}, 'multi_pose': { 'default_resolution': [512, 512], 'num_classes': 1, 'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278], 'dataset': 'coco_hp', 'num_joints': 17, 'flip_idx': [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]}, 'ddd': {'default_resolution': [384, 1280], 'num_classes': 3, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'dataset': 'kitti'}, } class Struct: def __init__(self, entries): for k, v in entries.items(): self.__setattr__(k, v) opt = self.parse(args) dataset = Struct(default_dataset_info[opt.task]) opt.dataset = dataset.dataset opt = self.update_dataset_info_and_set_heads(opt, dataset) return opt
18,703
50.526171
115
py
SyNet
SyNet-master/CenterNet/src/lib/logger.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import sys import torch USE_TENSORBOARD = True try: import tensorboardX print('Using tensorboardX') except: USE_TENSORBOARD = False class Logger(object): def __init__(self, opt): """Create a summary writer logging to log_dir.""" if not os.path.exists(opt.save_dir): os.makedirs(opt.save_dir) if not os.path.exists(opt.debug_dir): os.makedirs(opt.debug_dir) time_str = time.strftime('%Y-%m-%d-%H-%M') args = dict((name, getattr(opt, name)) for name in dir(opt) if not name.startswith('_')) file_name = os.path.join(opt.save_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('==> torch version: {}\n'.format(torch.__version__)) opt_file.write('==> cudnn version: {}\n'.format( torch.backends.cudnn.version())) opt_file.write('==> Cmd:\n') opt_file.write(str(sys.argv)) opt_file.write('\n==> Opt:\n') for k, v in sorted(args.items()): opt_file.write(' %s: %s\n' % (str(k), str(v))) log_dir = opt.save_dir + '/logs_{}'.format(time_str) if USE_TENSORBOARD: self.writer = tensorboardX.SummaryWriter(log_dir=log_dir) else: if not os.path.exists(os.path.dirname(log_dir)): os.mkdir(os.path.dirname(log_dir)) if not os.path.exists(log_dir): os.mkdir(log_dir) self.log = open(log_dir + '/log.txt', 'w') try: os.system('cp {}/opt.txt {}/'.format(opt.save_dir, log_dir)) except: pass self.start_line = True def write(self, txt): if self.start_line: time_str = time.strftime('%Y-%m-%d-%H-%M') self.log.write('{}: {}'.format(time_str, txt)) else: self.log.write(txt) self.start_line = False if '\n' in txt: self.start_line = True self.log.flush() def close(self): self.log.close() def scalar_summary(self, tag, value, step): """Log a scalar variable.""" if USE_TENSORBOARD: self.writer.add_scalar(tag, value, step)
2,228
29.534247
86
py
SyNet
SyNet-master/CenterNet/src/lib/external/setup.py
import numpy from distutils.core import setup from distutils.extension import Extension from Cython.Build import cythonize extensions = [ Extension( "nms", ["nms.pyx"]) ] setup( name="coco", ext_modules=cythonize(extensions), include_dirs=[numpy.get_include()] )
298
16.588235
41
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/exdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import _init_paths import os import cv2 import numpy as np from progress.bar import Bar import time import torch from models.decode import exct_decode, agnex_ct_decode from models.utils import flip_tensor from utils.image import get_affine_transform, transform_preds from utils.post_process import ctdet_post_process from utils.debugger import Debugger from .base_detector import BaseDetector class ExdetDetector(BaseDetector): def __init__(self, opt): super(ExdetDetector, self).__init__(opt) self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode def process(self, images, return_time=False): with torch.no_grad(): torch.cuda.synchronize() output = self.model(images)[-1] t_heat = output['hm_t'].sigmoid_() l_heat = output['hm_l'].sigmoid_() b_heat = output['hm_b'].sigmoid_() r_heat = output['hm_r'].sigmoid_() c_heat = output['hm_c'].sigmoid_() torch.cuda.synchronize() forward_time = time.time() if self.opt.reg_offset: dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, output['reg_t'], output['reg_l'], output['reg_b'], output['reg_r'], K=self.opt.K, scores_thresh=self.opt.scores_thresh, center_thresh=self.opt.center_thresh, aggr_weight=self.opt.aggr_weight) else: dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, K=self.opt.K, scores_thresh=self.opt.scores_thresh, center_thresh=self.opt.center_thresh, aggr_weight=self.opt.aggr_weight) if return_time: return output, dets, forward_time else: return output, dets def debug(self, debugger, images, dets, output, scale=1): detection = dets.detach().cpu().numpy().copy() detection[:, :, :4] *= self.opt.down_ratio for i in range(1): inp_height, inp_width = images.shape[2], images.shape[3] pred_hm = np.zeros((inp_height, inp_width, 3), dtype=np.uint8) img = images[i].detach().cpu().numpy().transpose(1, 2, 0) img = ((img * self.std + self.mean) * 255).astype(np.uint8) parts = ['t', 'l', 'b', 'r', 'c'] for p in parts: tag = 'hm_{}'.format(p) pred = debugger.gen_colormap( output[tag][i].detach().cpu().numpy(), (inp_height, inp_width)) if p != 'c': pred_hm = np.maximum(pred_hm, pred) else: debugger.add_blend_img( img, pred, 'pred_{}_{:.1f}'.format(p, scale)) debugger.add_blend_img(img, pred_hm, 'pred_{:.1f}'.format(scale)) debugger.add_img(img, img_id='out_{:.1f}'.format(scale)) for k in range(len(detection[i])): # print('detection', detection[i, k, 4], detection[i, k]) if detection[i, k, 4] > 0.01: # print('detection', detection[i, k, 4], detection[i, k]) debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1], detection[i, k, 4], img_id='out_{:.1f}'.format(scale)) def post_process(self, dets, meta, scale=1): out_width, out_height = meta['out_width'], meta['out_height'] dets = dets.detach().cpu().numpy().reshape(2, -1, 14) dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]] dets = dets.reshape(1, -1, 14) dets[0, :, 0:2] = transform_preds( dets[0, :, 0:2], meta['c'], meta['s'], (out_width, out_height)) dets[0, :, 2:4] = transform_preds( dets[0, :, 2:4], meta['c'], meta['s'], (out_width, out_height)) dets[:, :, 0:4] /= scale return dets[0] def merge_outputs(self, detections): detections = np.concatenate( [detection for detection in detections], axis=0).astype(np.float32) classes = detections[..., -1] keep_inds = (detections[:, 4] > 0) detections = detections[keep_inds] classes = classes[keep_inds] results = {} for j in range(self.num_classes): keep_inds = (classes == j) results[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32) soft_nms(results[j + 1], Nt=0.5, method=2) results[j + 1] = results[j + 1][:, 0:5] scores = np.hstack([ results[j][:, -1] for j in range(1, self.num_classes + 1) ]) if len(scores) > self.max_per_image: kth = len(scores) - self.max_per_image thresh = np.partition(scores, kth)[kth] for j in range(1, self.num_classes + 1): keep_inds = (results[j][:, -1] >= thresh) results[j] = results[j][keep_inds] return results def show_results(self, debugger, image, results): debugger.add_img(image, img_id='exdet') for j in range(1, self.num_classes + 1): for bbox in results[j]: if bbox[4] > self.opt.vis_thresh: debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='exdet') debugger.show_all_imgs(pause=self.pause)
5,063
37.363636
80
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/ctdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 import numpy as np from progress.bar import Bar import time import torch try: from external.nms import soft_nms except: print('NMS not imported! If you need it,' ' do \n cd $CenterNet_ROOT/src/lib/external \n make') from models.decode import ctdet_decode from models.utils import flip_tensor from utils.image import get_affine_transform from utils.post_process import ctdet_post_process from utils.debugger import Debugger from .base_detector import BaseDetector class CtdetDetector(BaseDetector): def __init__(self, opt): super(CtdetDetector, self).__init__(opt) def process(self, images, return_time=False): with torch.no_grad(): output = self.model(images)[-1] hm = output['hm'].sigmoid_() wh = output['wh'] reg = output['reg'] if self.opt.reg_offset else None if self.opt.flip_test: hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2 wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2 reg = reg[0:1] if reg is not None else None torch.cuda.synchronize() forward_time = time.time() dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K) if return_time: return output, dets, forward_time else: return output, dets def post_process(self, dets, meta, scale=1): dets = dets.detach().cpu().numpy() dets = dets.reshape(1, -1, dets.shape[2]) dets = ctdet_post_process( dets.copy(), [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.opt.num_classes) for j in range(1, self.num_classes + 1): dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5) dets[0][j][:, :4] /= scale return dets[0] def merge_outputs(self, detections): results = {} for j in range(1, self.num_classes + 1): results[j] = np.concatenate( [detection[j] for detection in detections], axis=0).astype(np.float32) if len(self.scales) > 1 or self.opt.nms: soft_nms(results[j], Nt=0.5, method=2) scores = np.hstack( [results[j][:, 4] for j in range(1, self.num_classes + 1)]) if len(scores) > self.max_per_image: kth = len(scores) - self.max_per_image thresh = np.partition(scores, kth)[kth] for j in range(1, self.num_classes + 1): keep_inds = (results[j][:, 4] >= thresh) results[j] = results[j][keep_inds] return results def debug(self, debugger, images, dets, output, scale=1): detection = dets.detach().cpu().numpy().copy() detection[:, :, :4] *= self.opt.down_ratio for i in range(1): img = images[i].detach().cpu().numpy().transpose(1, 2, 0) img = ((img * self.std + self.mean) * 255).astype(np.uint8) pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale)) debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale)) for k in range(len(dets[i])): if detection[i, k, 4] > self.opt.center_thresh: debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1], detection[i, k, 4], img_id='out_pred_{:.1f}'.format(scale)) def show_results(self, debugger, image, results): debugger.add_img(image, img_id='ctdet') for j in range(1, self.num_classes + 1): for bbox in results[j]: if bbox[4] > self.opt.vis_thresh: debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet') debugger.show_all_imgs(pause=self.pause)
3,674
36.886598
90
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/ddd.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 import numpy as np from progress.bar import Bar import time import torch from models.decode import ddd_decode from models.utils import flip_tensor from utils.image import get_affine_transform from utils.post_process import ddd_post_process from utils.debugger import Debugger from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d from .base_detector import BaseDetector class DddDetector(BaseDetector): def __init__(self, opt): super(DddDetector, self).__init__(opt) self.calib = np.array([[707.0493, 0, 604.0814, 45.75831], [0, 707.0493, 180.5066, -0.3454157], [0, 0, 1., 0.004981016]], dtype=np.float32) def pre_process(self, image, scale, calib=None): height, width = image.shape[0:2] inp_height, inp_width = self.opt.input_h, self.opt.input_w c = np.array([width / 2, height / 2], dtype=np.float32) if self.opt.keep_res: s = np.array([inp_width, inp_height], dtype=np.int32) else: s = np.array([width, height], dtype=np.int32) trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) resized_image = image #cv2.resize(image, (width, height)) inp_image = cv2.warpAffine( resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = (inp_image.astype(np.float32) / 255.) inp_image = (inp_image - self.mean) / self.std images = inp_image.transpose(2, 0, 1)[np.newaxis, ...] calib = np.array(calib, dtype=np.float32) if calib is not None \ else self.calib images = torch.from_numpy(images) meta = {'c': c, 's': s, 'out_height': inp_height // self.opt.down_ratio, 'out_width': inp_width // self.opt.down_ratio, 'calib': calib} return images, meta def process(self, images, return_time=False): with torch.no_grad(): torch.cuda.synchronize() output = self.model(images)[-1] output['hm'] = output['hm'].sigmoid_() output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. wh = output['wh'] if self.opt.reg_bbox else None reg = output['reg'] if self.opt.reg_offset else None torch.cuda.synchronize() forward_time = time.time() dets = ddd_decode(output['hm'], output['rot'], output['dep'], output['dim'], wh=wh, reg=reg, K=self.opt.K) if return_time: return output, dets, forward_time else: return output, dets def post_process(self, dets, meta, scale=1): dets = dets.detach().cpu().numpy() detections = ddd_post_process( dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt) self.this_calib = meta['calib'] return detections[0] def merge_outputs(self, detections): results = detections[0] for j in range(1, self.num_classes + 1): if len(results[j] > 0): keep_inds = (results[j][:, -1] > self.opt.peak_thresh) results[j] = results[j][keep_inds] return results def debug(self, debugger, images, dets, output, scale=1): dets = dets.detach().cpu().numpy() img = images[0].detach().cpu().numpy().transpose(1, 2, 0) img = ((img * self.std + self.mean) * 255).astype(np.uint8) pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hm') debugger.add_ct_detection( img, dets[0], show_box=self.opt.reg_bbox, center_thresh=self.opt.vis_thresh, img_id='det_pred') def show_results(self, debugger, image, results): debugger.add_3d_detection( image, results, self.this_calib, center_thresh=self.opt.vis_thresh, img_id='add_pred') debugger.add_bird_view( results, center_thresh=self.opt.vis_thresh, img_id='bird_pred') debugger.show_all_imgs(pause=self.pause)
4,013
36.867925
73
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/multi_pose.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 import numpy as np from progress.bar import Bar import time import torch try: from external.nms import soft_nms_39 except: print('NMS not imported! If you need it,' ' do \n cd $CenterNet_ROOT/src/lib/external \n make') from models.decode import multi_pose_decode from models.utils import flip_tensor, flip_lr_off, flip_lr from utils.image import get_affine_transform from utils.post_process import multi_pose_post_process from utils.debugger import Debugger from .base_detector import BaseDetector class MultiPoseDetector(BaseDetector): def __init__(self, opt): super(MultiPoseDetector, self).__init__(opt) self.flip_idx = opt.flip_idx def process(self, images, return_time=False): with torch.no_grad(): torch.cuda.synchronize() output = self.model(images)[-1] output['hm'] = output['hm'].sigmoid_() if self.opt.hm_hp and not self.opt.mse_loss: output['hm_hp'] = output['hm_hp'].sigmoid_() reg = output['reg'] if self.opt.reg_offset else None hm_hp = output['hm_hp'] if self.opt.hm_hp else None hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None torch.cuda.synchronize() forward_time = time.time() if self.opt.flip_test: output['hm'] = (output['hm'][0:1] + flip_tensor(output['hm'][1:2])) / 2 output['wh'] = (output['wh'][0:1] + flip_tensor(output['wh'][1:2])) / 2 output['hps'] = (output['hps'][0:1] + flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2 hm_hp = (hm_hp[0:1] + flip_lr(hm_hp[1:2], self.flip_idx)) / 2 \ if hm_hp is not None else None reg = reg[0:1] if reg is not None else None hp_offset = hp_offset[0:1] if hp_offset is not None else None dets = multi_pose_decode( output['hm'], output['wh'], output['hps'], reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K) if return_time: return output, dets, forward_time else: return output, dets def post_process(self, dets, meta, scale=1): dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) dets = multi_pose_post_process( dets.copy(), [meta['c']], [meta['s']], meta['out_height'], meta['out_width']) for j in range(1, self.num_classes + 1): dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 39) # import pdb; pdb.set_trace() dets[0][j][:, :4] /= scale dets[0][j][:, 5:] /= scale return dets[0] def merge_outputs(self, detections): results = {} results[1] = np.concatenate( [detection[1] for detection in detections], axis=0).astype(np.float32) if self.opt.nms or len(self.opt.test_scales) > 1: soft_nms_39(results[1], Nt=0.5, method=2) results[1] = results[1].tolist() return results def debug(self, debugger, images, dets, output, scale=1): dets = dets.detach().cpu().numpy().copy() dets[:, :, :4] *= self.opt.down_ratio dets[:, :, 5:39] *= self.opt.down_ratio img = images[0].detach().cpu().numpy().transpose(1, 2, 0) img = np.clip((( img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8) pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hm') if self.opt.hm_hp: pred = debugger.gen_colormap_hp( output['hm_hp'][0].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hmhp') def show_results(self, debugger, image, results): debugger.add_img(image, img_id='multi_pose') for bbox in results[1]: if bbox[4] > self.opt.vis_thresh: debugger.add_coco_bbox(bbox[:4], 0, bbox[4], img_id='multi_pose') debugger.add_coco_hp(bbox[5:39], img_id='multi_pose') debugger.show_all_imgs(pause=self.pause)
3,923
37.097087
79
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/detector_factory.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from .exdet import ExdetDetector from .ddd import DddDetector from .ctdet import CtdetDetector from .multi_pose import MultiPoseDetector detector_factory = { 'exdet': ExdetDetector, 'ddd': DddDetector, 'ctdet': CtdetDetector, 'multi_pose': MultiPoseDetector, }
382
22.9375
41
py
SyNet
SyNet-master/CenterNet/src/lib/detectors/base_detector.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 import numpy as np from progress.bar import Bar import time import torch from models.model import create_model, load_model from utils.image import get_affine_transform from utils.debugger import Debugger class BaseDetector(object): def __init__(self, opt): if opt.gpus[0] >= 0: opt.device = torch.device('cuda') else: opt.device = torch.device('cpu') print('Creating model...') self.model = create_model(opt.arch, opt.heads, opt.head_conv) self.model = load_model(self.model, opt.load_model) self.model = self.model.to(opt.device) self.model.eval() self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3) self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3) self.max_per_image = 100 self.num_classes = opt.num_classes self.scales = opt.test_scales self.opt = opt self.pause = True def pre_process(self, image, scale, meta=None): height, width = image.shape[0:2] new_height = int(height * scale) new_width = int(width * scale) if self.opt.fix_res: inp_height, inp_width = self.opt.input_h, self.opt.input_w c = np.array([new_width / 2., new_height / 2.], dtype=np.float32) s = max(height, width) * 1.0 else: inp_height = (new_height | self.opt.pad) + 1 inp_width = (new_width | self.opt.pad) + 1 c = np.array([new_width // 2, new_height // 2], dtype=np.float32) s = np.array([inp_width, inp_height], dtype=np.float32) trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) resized_image = cv2.resize(image, (new_width, new_height)) inp_image = cv2.warpAffine( resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) if self.opt.flip_test: images = np.concatenate((images, images[:, :, :, ::-1]), axis=0) images = torch.from_numpy(images) meta = {'c': c, 's': s, 'out_height': inp_height // self.opt.down_ratio, 'out_width': inp_width // self.opt.down_ratio} return images, meta def process(self, images, return_time=False): raise NotImplementedError def post_process(self, dets, meta, scale=1): raise NotImplementedError def merge_outputs(self, detections): raise NotImplementedError def debug(self, debugger, images, dets, output, scale=1): raise NotImplementedError def show_results(self, debugger, image, results): raise NotImplementedError def run(self, image_or_path_or_tensor, meta=None): load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0 merge_time, tot_time = 0, 0 debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3), theme=self.opt.debugger_theme) start_time = time.time() pre_processed = False if isinstance(image_or_path_or_tensor, np.ndarray): image = image_or_path_or_tensor elif type(image_or_path_or_tensor) == type (''): image = cv2.imread(image_or_path_or_tensor) else: image = image_or_path_or_tensor['image'][0].numpy() pre_processed_images = image_or_path_or_tensor pre_processed = True loaded_time = time.time() load_time += (loaded_time - start_time) detections = [] for scale in self.scales: scale_start_time = time.time() if not pre_processed: images, meta = self.pre_process(image, scale, meta) else: # import pdb; pdb.set_trace() images = pre_processed_images['images'][scale][0] meta = pre_processed_images['meta'][scale] meta = {k: v.numpy()[0] for k, v in meta.items()} images = images.to(self.opt.device) torch.cuda.synchronize() pre_process_time = time.time() pre_time += pre_process_time - scale_start_time output, dets, forward_time = self.process(images, return_time=True) torch.cuda.synchronize() net_time += forward_time - pre_process_time decode_time = time.time() dec_time += decode_time - forward_time if self.opt.debug >= 2: self.debug(debugger, images, dets, output, scale) dets = self.post_process(dets, meta, scale) torch.cuda.synchronize() post_process_time = time.time() post_time += post_process_time - decode_time detections.append(dets) results = self.merge_outputs(detections) torch.cuda.synchronize() end_time = time.time() merge_time += end_time - post_process_time tot_time += end_time - start_time if self.opt.debug >= 1: self.show_results(debugger, image, results) return {'results': results, 'tot': tot_time, 'load': load_time, 'pre': pre_time, 'net': net_time, 'dec': dec_time, 'post': post_time, 'merge': merge_time}
5,061
34.152778
78
py
SyNet
SyNet-master/CenterNet/src/lib/models/decode.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn from .utils import _gather_feat, _transpose_and_gather_feat def _nms(heat, kernel=3): pad = (kernel - 1) // 2 hmax = nn.functional.max_pool2d( heat, (kernel, kernel), stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def _left_aggregate(heat): ''' heat: batchsize x channels x h x w ''' shape = heat.shape heat = heat.reshape(-1, heat.shape[3]) heat = heat.transpose(1, 0).contiguous() ret = heat.clone() for i in range(1, heat.shape[0]): inds = (heat[i] >= heat[i - 1]) ret[i] += ret[i - 1] * inds.float() return (ret - heat).transpose(1, 0).reshape(shape) def _right_aggregate(heat): ''' heat: batchsize x channels x h x w ''' shape = heat.shape heat = heat.reshape(-1, heat.shape[3]) heat = heat.transpose(1, 0).contiguous() ret = heat.clone() for i in range(heat.shape[0] - 2, -1, -1): inds = (heat[i] >= heat[i +1]) ret[i] += ret[i + 1] * inds.float() return (ret - heat).transpose(1, 0).reshape(shape) def _top_aggregate(heat): ''' heat: batchsize x channels x h x w ''' heat = heat.transpose(3, 2) shape = heat.shape heat = heat.reshape(-1, heat.shape[3]) heat = heat.transpose(1, 0).contiguous() ret = heat.clone() for i in range(1, heat.shape[0]): inds = (heat[i] >= heat[i - 1]) ret[i] += ret[i - 1] * inds.float() return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2) def _bottom_aggregate(heat): ''' heat: batchsize x channels x h x w ''' heat = heat.transpose(3, 2) shape = heat.shape heat = heat.reshape(-1, heat.shape[3]) heat = heat.transpose(1, 0).contiguous() ret = heat.clone() for i in range(heat.shape[0] - 2, -1, -1): inds = (heat[i] >= heat[i + 1]) ret[i] += ret[i + 1] * inds.float() return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2) def _h_aggregate(heat, aggr_weight=0.1): return aggr_weight * _left_aggregate(heat) + \ aggr_weight * _right_aggregate(heat) + heat def _v_aggregate(heat, aggr_weight=0.1): return aggr_weight * _top_aggregate(heat) + \ aggr_weight * _bottom_aggregate(heat) + heat ''' # Slow for large number of categories def _topk(scores, K=40): batch, cat, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K) topk_clses = (topk_inds / (height * width)).int() topk_inds = topk_inds % (height * width) topk_ys = (topk_inds / width).int().float() topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs ''' def _topk_channel(scores, K=40): batch, cat, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) topk_inds = topk_inds % (height * width) topk_ys = (topk_inds / width).int().float() topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_ys, topk_xs def _topk(scores, K=40): batch, cat, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) topk_inds = topk_inds % (height * width) topk_ys = (topk_inds / width).int().float() topk_xs = (topk_inds % width).int().float() topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K) topk_clses = (topk_ind / K).int() topk_inds = _gather_feat( topk_inds.view(batch, -1, 1), topk_ind).view(batch, K) topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K) topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K) return topk_score, topk_inds, topk_clses, topk_ys, topk_xs def agnex_ct_decode( t_heat, l_heat, b_heat, r_heat, ct_heat, t_regr=None, l_regr=None, b_regr=None, r_regr=None, K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000 ): batch, cat, height, width = t_heat.size() ''' t_heat = torch.sigmoid(t_heat) l_heat = torch.sigmoid(l_heat) b_heat = torch.sigmoid(b_heat) r_heat = torch.sigmoid(r_heat) ct_heat = torch.sigmoid(ct_heat) ''' if aggr_weight > 0: t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight) l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight) b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight) r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight) # perform nms on heatmaps t_heat = _nms(t_heat) l_heat = _nms(l_heat) b_heat = _nms(b_heat) r_heat = _nms(r_heat) t_heat[t_heat > 1] = 1 l_heat[l_heat > 1] = 1 b_heat[b_heat > 1] = 1 r_heat[r_heat > 1] = 1 t_scores, t_inds, _, t_ys, t_xs = _topk(t_heat, K=K) l_scores, l_inds, _, l_ys, l_xs = _topk(l_heat, K=K) b_scores, b_inds, _, b_ys, b_xs = _topk(b_heat, K=K) r_scores, r_inds, _, r_ys, r_xs = _topk(r_heat, K=K) ct_heat_agn, ct_clses = torch.max(ct_heat, dim=1, keepdim=True) # import pdb; pdb.set_trace() t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long() box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long() ct_inds = box_ct_ys * width + box_ct_xs ct_inds = ct_inds.view(batch, -1) ct_heat_agn = ct_heat_agn.view(batch, -1, 1) ct_clses = ct_clses.view(batch, -1, 1) ct_scores = _gather_feat(ct_heat_agn, ct_inds) clses = _gather_feat(ct_clses, ct_inds) t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) ct_scores = ct_scores.view(batch, K, K, K, K) scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6 # reject boxes based on classes top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys) top_inds = (top_inds > 0) left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs) left_inds = (left_inds > 0) bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys) bottom_inds = (bottom_inds > 0) right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs) right_inds = (right_inds > 0) sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \ (b_scores < scores_thresh) + (r_scores < scores_thresh) + \ (ct_scores < center_thresh) sc_inds = (sc_inds > 0) scores = scores - sc_inds.float() scores = scores - top_inds.float() scores = scores - left_inds.float() scores = scores - bottom_inds.float() scores = scores - right_inds.float() scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) if t_regr is not None and l_regr is not None \ and b_regr is not None and r_regr is not None: t_regr = _transpose_and_gather_feat(t_regr, t_inds) t_regr = t_regr.view(batch, K, 1, 1, 1, 2) l_regr = _transpose_and_gather_feat(l_regr, l_inds) l_regr = l_regr.view(batch, 1, K, 1, 1, 2) b_regr = _transpose_and_gather_feat(b_regr, b_inds) b_regr = b_regr.view(batch, 1, 1, K, 1, 2) r_regr = _transpose_and_gather_feat(r_regr, r_inds) r_regr = r_regr.view(batch, 1, 1, 1, K, 2) t_xs = t_xs + t_regr[..., 0] t_ys = t_ys + t_regr[..., 1] l_xs = l_xs + l_regr[..., 0] l_ys = l_ys + l_regr[..., 1] b_xs = b_xs + b_regr[..., 0] b_ys = b_ys + b_regr[..., 1] r_xs = r_xs + r_regr[..., 0] r_ys = r_ys + r_regr[..., 1] else: t_xs = t_xs + 0.5 t_ys = t_ys + 0.5 l_xs = l_xs + 0.5 l_ys = l_ys + 0.5 b_xs = b_xs + 0.5 b_ys = b_ys + 0.5 r_xs = r_xs + 0.5 r_ys = r_ys + 0.5 bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5) bboxes = bboxes.view(batch, -1, 4) bboxes = _gather_feat(bboxes, inds) clses = clses.contiguous().view(batch, -1, 1) clses = _gather_feat(clses, inds).float() t_xs = t_xs.contiguous().view(batch, -1, 1) t_xs = _gather_feat(t_xs, inds).float() t_ys = t_ys.contiguous().view(batch, -1, 1) t_ys = _gather_feat(t_ys, inds).float() l_xs = l_xs.contiguous().view(batch, -1, 1) l_xs = _gather_feat(l_xs, inds).float() l_ys = l_ys.contiguous().view(batch, -1, 1) l_ys = _gather_feat(l_ys, inds).float() b_xs = b_xs.contiguous().view(batch, -1, 1) b_xs = _gather_feat(b_xs, inds).float() b_ys = b_ys.contiguous().view(batch, -1, 1) b_ys = _gather_feat(b_ys, inds).float() r_xs = r_xs.contiguous().view(batch, -1, 1) r_xs = _gather_feat(r_xs, inds).float() r_ys = r_ys.contiguous().view(batch, -1, 1) r_ys = _gather_feat(r_ys, inds).float() detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, b_xs, b_ys, r_xs, r_ys, clses], dim=2) return detections def exct_decode( t_heat, l_heat, b_heat, r_heat, ct_heat, t_regr=None, l_regr=None, b_regr=None, r_regr=None, K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000 ): batch, cat, height, width = t_heat.size() ''' t_heat = torch.sigmoid(t_heat) l_heat = torch.sigmoid(l_heat) b_heat = torch.sigmoid(b_heat) r_heat = torch.sigmoid(r_heat) ct_heat = torch.sigmoid(ct_heat) ''' if aggr_weight > 0: t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight) l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight) b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight) r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight) # perform nms on heatmaps t_heat = _nms(t_heat) l_heat = _nms(l_heat) b_heat = _nms(b_heat) r_heat = _nms(r_heat) t_heat[t_heat > 1] = 1 l_heat[l_heat > 1] = 1 b_heat[b_heat > 1] = 1 r_heat[r_heat > 1] = 1 t_scores, t_inds, t_clses, t_ys, t_xs = _topk(t_heat, K=K) l_scores, l_inds, l_clses, l_ys, l_xs = _topk(l_heat, K=K) b_scores, b_inds, b_clses, b_ys, b_xs = _topk(b_heat, K=K) r_scores, r_inds, r_clses, r_ys, r_xs = _topk(r_heat, K=K) t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long() box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long() ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs ct_inds = ct_inds.view(batch, -1) ct_heat = ct_heat.view(batch, -1, 1) ct_scores = _gather_feat(ct_heat, ct_inds) t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) ct_scores = ct_scores.view(batch, K, K, K, K) scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6 # reject boxes based on classes cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + \ (t_clses != r_clses) cls_inds = (cls_inds > 0) top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys) top_inds = (top_inds > 0) left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs) left_inds = (left_inds > 0) bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys) bottom_inds = (bottom_inds > 0) right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs) right_inds = (right_inds > 0) sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \ (b_scores < scores_thresh) + (r_scores < scores_thresh) + \ (ct_scores < center_thresh) sc_inds = (sc_inds > 0) scores = scores - sc_inds.float() scores = scores - cls_inds.float() scores = scores - top_inds.float() scores = scores - left_inds.float() scores = scores - bottom_inds.float() scores = scores - right_inds.float() scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) if t_regr is not None and l_regr is not None \ and b_regr is not None and r_regr is not None: t_regr = _transpose_and_gather_feat(t_regr, t_inds) t_regr = t_regr.view(batch, K, 1, 1, 1, 2) l_regr = _transpose_and_gather_feat(l_regr, l_inds) l_regr = l_regr.view(batch, 1, K, 1, 1, 2) b_regr = _transpose_and_gather_feat(b_regr, b_inds) b_regr = b_regr.view(batch, 1, 1, K, 1, 2) r_regr = _transpose_and_gather_feat(r_regr, r_inds) r_regr = r_regr.view(batch, 1, 1, 1, K, 2) t_xs = t_xs + t_regr[..., 0] t_ys = t_ys + t_regr[..., 1] l_xs = l_xs + l_regr[..., 0] l_ys = l_ys + l_regr[..., 1] b_xs = b_xs + b_regr[..., 0] b_ys = b_ys + b_regr[..., 1] r_xs = r_xs + r_regr[..., 0] r_ys = r_ys + r_regr[..., 1] else: t_xs = t_xs + 0.5 t_ys = t_ys + 0.5 l_xs = l_xs + 0.5 l_ys = l_ys + 0.5 b_xs = b_xs + 0.5 b_ys = b_ys + 0.5 r_xs = r_xs + 0.5 r_ys = r_ys + 0.5 bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5) bboxes = bboxes.view(batch, -1, 4) bboxes = _gather_feat(bboxes, inds) clses = t_clses.contiguous().view(batch, -1, 1) clses = _gather_feat(clses, inds).float() t_xs = t_xs.contiguous().view(batch, -1, 1) t_xs = _gather_feat(t_xs, inds).float() t_ys = t_ys.contiguous().view(batch, -1, 1) t_ys = _gather_feat(t_ys, inds).float() l_xs = l_xs.contiguous().view(batch, -1, 1) l_xs = _gather_feat(l_xs, inds).float() l_ys = l_ys.contiguous().view(batch, -1, 1) l_ys = _gather_feat(l_ys, inds).float() b_xs = b_xs.contiguous().view(batch, -1, 1) b_xs = _gather_feat(b_xs, inds).float() b_ys = b_ys.contiguous().view(batch, -1, 1) b_ys = _gather_feat(b_ys, inds).float() r_xs = r_xs.contiguous().view(batch, -1, 1) r_xs = _gather_feat(r_xs, inds).float() r_ys = r_ys.contiguous().view(batch, -1, 1) r_ys = _gather_feat(r_ys, inds).float() detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, b_xs, b_ys, r_xs, r_ys, clses], dim=2) return detections def ddd_decode(heat, rot, depth, dim, wh=None, reg=None, K=40): batch, cat, height, width = heat.size() # heat = torch.sigmoid(heat) # perform nms on heatmaps heat = _nms(heat) scores, inds, clses, ys, xs = _topk(heat, K=K) if reg is not None: reg = _transpose_and_gather_feat(reg, inds) reg = reg.view(batch, K, 2) xs = xs.view(batch, K, 1) + reg[:, :, 0:1] ys = ys.view(batch, K, 1) + reg[:, :, 1:2] else: xs = xs.view(batch, K, 1) + 0.5 ys = ys.view(batch, K, 1) + 0.5 rot = _transpose_and_gather_feat(rot, inds) rot = rot.view(batch, K, 8) depth = _transpose_and_gather_feat(depth, inds) depth = depth.view(batch, K, 1) dim = _transpose_and_gather_feat(dim, inds) dim = dim.view(batch, K, 3) clses = clses.view(batch, K, 1).float() scores = scores.view(batch, K, 1) xs = xs.view(batch, K, 1) ys = ys.view(batch, K, 1) if wh is not None: wh = _transpose_and_gather_feat(wh, inds) wh = wh.view(batch, K, 2) detections = torch.cat( [xs, ys, scores, rot, depth, dim, wh, clses], dim=2) else: detections = torch.cat( [xs, ys, scores, rot, depth, dim, clses], dim=2) return detections def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100): batch, cat, height, width = heat.size() # heat = torch.sigmoid(heat) # perform nms on heatmaps heat = _nms(heat) scores, inds, clses, ys, xs = _topk(heat, K=K) if reg is not None: reg = _transpose_and_gather_feat(reg, inds) reg = reg.view(batch, K, 2) xs = xs.view(batch, K, 1) + reg[:, :, 0:1] ys = ys.view(batch, K, 1) + reg[:, :, 1:2] else: xs = xs.view(batch, K, 1) + 0.5 ys = ys.view(batch, K, 1) + 0.5 wh = _transpose_and_gather_feat(wh, inds) if cat_spec_wh: wh = wh.view(batch, K, cat, 2) clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long() wh = wh.gather(2, clses_ind).view(batch, K, 2) else: wh = wh.view(batch, K, 2) clses = clses.view(batch, K, 1).float() scores = scores.view(batch, K, 1) bboxes = torch.cat([xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[..., 0:1] / 2, ys + wh[..., 1:2] / 2], dim=2) detections = torch.cat([bboxes, scores, clses], dim=2) return detections def multi_pose_decode( heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100): batch, cat, height, width = heat.size() num_joints = kps.shape[1] // 2 # heat = torch.sigmoid(heat) # perform nms on heatmaps heat = _nms(heat) scores, inds, clses, ys, xs = _topk(heat, K=K) kps = _transpose_and_gather_feat(kps, inds) kps = kps.view(batch, K, num_joints * 2) kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints) kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints) if reg is not None: reg = _transpose_and_gather_feat(reg, inds) reg = reg.view(batch, K, 2) xs = xs.view(batch, K, 1) + reg[:, :, 0:1] ys = ys.view(batch, K, 1) + reg[:, :, 1:2] else: xs = xs.view(batch, K, 1) + 0.5 ys = ys.view(batch, K, 1) + 0.5 wh = _transpose_and_gather_feat(wh, inds) wh = wh.view(batch, K, 2) clses = clses.view(batch, K, 1).float() scores = scores.view(batch, K, 1) bboxes = torch.cat([xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[..., 0:1] / 2, ys + wh[..., 1:2] / 2], dim=2) if hm_hp is not None: hm_hp = _nms(hm_hp) thresh = 0.1 kps = kps.view(batch, K, num_joints, 2).permute( 0, 2, 1, 3).contiguous() # b x J x K x 2 reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2) hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K if hp_offset is not None: hp_offset = _transpose_and_gather_feat( hp_offset, hm_inds.view(batch, -1)) hp_offset = hp_offset.view(batch, num_joints, K, 2) hm_xs = hm_xs + hp_offset[:, :, :, 0] hm_ys = hm_ys + hp_offset[:, :, :, 1] else: hm_xs = hm_xs + 0.5 hm_ys = hm_ys + 0.5 mask = (hm_score > thresh).float() hm_score = (1 - mask) * -1 + mask * hm_score hm_ys = (1 - mask) * (-10000) + mask * hm_ys hm_xs = (1 - mask) * (-10000) + mask * hm_xs hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze( 2).expand(batch, num_joints, K, K, 2) dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5) min_dist, min_ind = dist.min(dim=3) # b x J x K hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1 min_dist = min_dist.unsqueeze(-1) min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand( batch, num_joints, K, 1, 2) hm_kps = hm_kps.gather(3, min_ind) hm_kps = hm_kps.view(batch, num_joints, K, 2) l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \ (hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \ (hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3)) mask = (mask > 0).float().expand(batch, num_joints, K, 2) kps = (1 - mask) * hm_kps + mask * kps kps = kps.permute(0, 2, 1, 3).contiguous().view( batch, K, num_joints * 2) detections = torch.cat([bboxes, scores, kps, clses], dim=2) return detections
21,763
37.115587
79
py
SyNet
SyNet-master/CenterNet/src/lib/models/losses.py
# Portions of this code are from from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn from .utils import _transpose_and_gather_feat import torch.nn.functional as F def _slow_neg_loss(pred, gt): '''focal loss from CornerNet''' pos_inds = gt.eq(1) neg_inds = gt.lt(1) neg_weights = torch.pow(1 - gt[neg_inds], 4) loss = 0 pos_pred = pred[pos_inds] neg_pred = pred[neg_inds] pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights num_pos = pos_inds.float().sum() pos_loss = pos_loss.sum() neg_loss = neg_loss.sum() if pos_pred.nelement() == 0: loss = loss - neg_loss else: loss = loss - (pos_loss + neg_loss) / num_pos return loss def _neg_loss(pred, gt): ''' Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory Arguments: pred (batch x c x h x w) gt_regr (batch x c x h x w) ''' pos_inds = gt.eq(1).float() neg_inds = gt.lt(1).float() neg_weights = torch.pow(1 - gt, 4) loss = 0 pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds num_pos = pos_inds.float().sum() pos_loss = pos_loss.sum() neg_loss = neg_loss.sum() if num_pos == 0: loss = loss - neg_loss else: loss = loss - (pos_loss + neg_loss) / num_pos return loss def _not_faster_neg_loss(pred, gt): pos_inds = gt.eq(1).float() neg_inds = gt.lt(1).float() num_pos = pos_inds.float().sum() neg_weights = torch.pow(1 - gt, 4) loss = 0 trans_pred = pred * neg_inds + (1 - pred) * pos_inds weight = neg_weights * neg_inds + pos_inds all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight all_loss = all_loss.sum() if num_pos > 0: all_loss /= num_pos loss -= all_loss return loss def _slow_reg_loss(regr, gt_regr, mask): num = mask.float().sum() mask = mask.unsqueeze(2).expand_as(gt_regr) regr = regr[mask] gt_regr = gt_regr[mask] regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) regr_loss = regr_loss / (num + 1e-4) return regr_loss def _reg_loss(regr, gt_regr, mask): ''' L1 regression loss Arguments: regr (batch x max_objects x dim) gt_regr (batch x max_objects x dim) mask (batch x max_objects) ''' num = mask.float().sum() mask = mask.unsqueeze(2).expand_as(gt_regr).float() regr = regr * mask gt_regr = gt_regr * mask regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) regr_loss = regr_loss / (num + 1e-4) return regr_loss class FocalLoss(nn.Module): '''nn.Module warpper for focal loss''' def __init__(self): super(FocalLoss, self).__init__() self.neg_loss = _neg_loss def forward(self, out, target): return self.neg_loss(out, target) class RegLoss(nn.Module): '''Regression loss for an output tensor Arguments: output (batch x dim x h x w) mask (batch x max_objects) ind (batch x max_objects) target (batch x max_objects x dim) ''' def __init__(self): super(RegLoss, self).__init__() def forward(self, output, mask, ind, target): pred = _transpose_and_gather_feat(output, ind) loss = _reg_loss(pred, target, mask) return loss class RegL1Loss(nn.Module): def __init__(self): super(RegL1Loss, self).__init__() def forward(self, output, mask, ind, target): pred = _transpose_and_gather_feat(output, ind) mask = mask.unsqueeze(2).expand_as(pred).float() # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') loss = F.l1_loss(pred * mask, target * mask, size_average=False) loss = loss / (mask.sum() + 1e-4) return loss class NormRegL1Loss(nn.Module): def __init__(self): super(NormRegL1Loss, self).__init__() def forward(self, output, mask, ind, target): pred = _transpose_and_gather_feat(output, ind) mask = mask.unsqueeze(2).expand_as(pred).float() # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') pred = pred / (target + 1e-4) target = target * 0 + 1 loss = F.l1_loss(pred * mask, target * mask, size_average=False) loss = loss / (mask.sum() + 1e-4) return loss class RegWeightedL1Loss(nn.Module): def __init__(self): super(RegWeightedL1Loss, self).__init__() def forward(self, output, mask, ind, target): pred = _transpose_and_gather_feat(output, ind) mask = mask.float() # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') loss = F.l1_loss(pred * mask, target * mask, size_average=False) loss = loss / (mask.sum() + 1e-4) return loss class L1Loss(nn.Module): def __init__(self): super(L1Loss, self).__init__() def forward(self, output, mask, ind, target): pred = _transpose_and_gather_feat(output, ind) mask = mask.unsqueeze(2).expand_as(pred).float() loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') return loss class BinRotLoss(nn.Module): def __init__(self): super(BinRotLoss, self).__init__() def forward(self, output, mask, ind, rotbin, rotres): pred = _transpose_and_gather_feat(output, ind) loss = compute_rot_loss(pred, rotbin, rotres, mask) return loss def compute_res_loss(output, target): return F.smooth_l1_loss(output, target, reduction='elementwise_mean') # TODO: weight def compute_bin_loss(output, target, mask): mask = mask.expand_as(output) output = output * mask.float() return F.cross_entropy(output, target, reduction='elementwise_mean') def compute_rot_loss(output, target_bin, target_res, mask): # output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos] # target_bin: (B, 128, 2) [bin1_cls, bin2_cls] # target_res: (B, 128, 2) [bin1_res, bin2_res] # mask: (B, 128, 1) # import pdb; pdb.set_trace() output = output.view(-1, 8) target_bin = target_bin.view(-1, 2) target_res = target_res.view(-1, 2) mask = mask.view(-1, 1) loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask) loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask) loss_res = torch.zeros_like(loss_bin1) if target_bin[:, 0].nonzero().shape[0] > 0: idx1 = target_bin[:, 0].nonzero()[:, 0] valid_output1 = torch.index_select(output, 0, idx1.long()) valid_target_res1 = torch.index_select(target_res, 0, idx1.long()) loss_sin1 = compute_res_loss( valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) loss_cos1 = compute_res_loss( valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) loss_res += loss_sin1 + loss_cos1 if target_bin[:, 1].nonzero().shape[0] > 0: idx2 = target_bin[:, 1].nonzero()[:, 0] valid_output2 = torch.index_select(output, 0, idx2.long()) valid_target_res2 = torch.index_select(target_res, 0, idx2.long()) loss_sin2 = compute_res_loss( valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) loss_cos2 = compute_res_loss( valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) loss_res += loss_sin2 + loss_cos2 return loss_bin1 + loss_bin2 + loss_res
7,843
31.957983
80
py
SyNet
SyNet-master/CenterNet/src/lib/models/data_parallel.py
import torch from torch.nn.modules import Module from torch.nn.parallel.scatter_gather import gather from torch.nn.parallel.replicate import replicate from torch.nn.parallel.parallel_apply import parallel_apply from .scatter_gather import scatter_kwargs class _DataParallel(Module): r"""Implements data parallelism at the module level. This container parallelizes the application of the given module by splitting the input across the specified devices by chunking in the batch dimension. In the forward pass, the module is replicated on each device, and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module. The batch size should be larger than the number of GPUs used. It should also be an integer multiple of the number of GPUs so that each chunk is the same size (so that each GPU processes the same number of samples). See also: :ref:`cuda-nn-dataparallel-instead` Arbitrary positional and keyword inputs are allowed to be passed into DataParallel EXCEPT Tensors. All variables will be scattered on dim specified (default 0). Primitive types will be broadcasted, but all other types will be a shallow copy and can be corrupted if written to in the model's forward pass. Args: module: module to be parallelized device_ids: CUDA devices (default: all devices) output_device: device location of output (default: device_ids[0]) Example:: >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) >>> output = net(input_var) """ # TODO: update notes/cuda.rst when this class handles 8+ GPUs well def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None): super(_DataParallel, self).__init__() if not torch.cuda.is_available(): self.module = module self.device_ids = [] return if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if output_device is None: output_device = device_ids[0] self.dim = dim self.module = module self.device_ids = device_ids self.chunk_sizes = chunk_sizes self.output_device = output_device if len(self.device_ids) == 1: self.module.cuda(device_ids[0]) def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes) if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) outputs = self.parallel_apply(replicas, inputs, kwargs) return self.gather(outputs, self.output_device) def replicate(self, module, device_ids): return replicate(module, device_ids) def scatter(self, inputs, kwargs, device_ids, chunk_sizes): return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes) def parallel_apply(self, replicas, inputs, kwargs): return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) def gather(self, outputs, output_device): return gather(outputs, output_device, dim=self.dim) def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None): r"""Evaluates module(input) in parallel across the GPUs given in device_ids. This is the functional version of the DataParallel module. Args: module: the module to evaluate in parallel inputs: inputs to the module device_ids: GPU ids on which to replicate module output_device: GPU location of the output Use -1 to indicate the CPU. (default: device_ids[0]) Returns: a Variable containing the result of module(input) located on output_device """ if not isinstance(inputs, tuple): inputs = (inputs,) if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if output_device is None: output_device = device_ids[0] inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) if len(device_ids) == 1: return module(*inputs[0], **module_kwargs[0]) used_device_ids = device_ids[:len(inputs)] replicas = replicate(module, used_device_ids) outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) return gather(outputs, output_device, dim) def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None): if chunk_sizes is None: return torch.nn.DataParallel(module, device_ids, output_device, dim) standard_size = True for i in range(1, len(chunk_sizes)): if chunk_sizes[i] != chunk_sizes[0]: standard_size = False if standard_size: return torch.nn.DataParallel(module, device_ids, output_device, dim) return _DataParallel(module, device_ids, output_device, dim, chunk_sizes)
5,176
39.445313
101
py
SyNet
SyNet-master/CenterNet/src/lib/models/utils.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn def _sigmoid(x): y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4) return y def _gather_feat(feat, ind, mask=None): dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _transpose_and_gather_feat(feat, ind): feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = _gather_feat(feat, ind) return feat def flip_tensor(x): return torch.flip(x, [3]) # tmp = x.detach().cpu().numpy()[..., ::-1].copy() # return torch.from_numpy(tmp).to(x.device) def flip_lr(x, flip_idx): tmp = x.detach().cpu().numpy()[..., ::-1].copy() shape = tmp.shape for e in flip_idx: tmp[:, e[0], ...], tmp[:, e[1], ...] = \ tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() return torch.from_numpy(tmp.reshape(shape)).to(x.device) def flip_lr_off(x, flip_idx): tmp = x.detach().cpu().numpy()[..., ::-1].copy() shape = tmp.shape tmp = tmp.reshape(tmp.shape[0], 17, 2, tmp.shape[2], tmp.shape[3]) tmp[:, :, 0, :, :] *= -1 for e in flip_idx: tmp[:, e[0], ...], tmp[:, e[1], ...] = \ tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() return torch.from_numpy(tmp.reshape(shape)).to(x.device)
1,571
30.44
65
py
SyNet
SyNet-master/CenterNet/src/lib/models/model.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torchvision.models as models import torch import torch.nn as nn import os from .networks.msra_resnet import get_pose_net from .networks.dlav0 import get_pose_net as get_dlav0 from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn from .networks.large_hourglass import get_large_hourglass_net _model_factory = { 'res': get_pose_net, # default Resnet with deconv 'dlav0': get_dlav0, # default DLAup 'dla': get_dla_dcn, 'resdcn': get_pose_net_dcn, 'hourglass': get_large_hourglass_net, } def create_model(arch, heads, head_conv): num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0 arch = arch[:arch.find('_')] if '_' in arch else arch get_model = _model_factory[arch] model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv) return model def load_model(model, model_path, optimizer=None, resume=False, lr=None, lr_step=None): start_epoch = 0 checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage) print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch'])) state_dict_ = checkpoint['state_dict'] state_dict = {} # convert data_parallal to model for k in state_dict_: if k.startswith('module') and not k.startswith('module_list'): state_dict[k[7:]] = state_dict_[k] else: state_dict[k] = state_dict_[k] model_state_dict = model.state_dict() # check loaded parameters and created model parameters msg = 'If you see this, your model does not fully load the ' + \ 'pre-trained weight. Please make sure ' + \ 'you have correctly specified --arch xxx ' + \ 'or set the correct --num_classes for your own dataset.' for k in state_dict: if k in model_state_dict: if state_dict[k].shape != model_state_dict[k].shape: print('Skip loading parameter {}, required shape{}, '\ 'loaded shape{}. {}'.format( k, model_state_dict[k].shape, state_dict[k].shape, msg)) state_dict[k] = model_state_dict[k] else: print('Drop parameter {}.'.format(k) + msg) for k in model_state_dict: if not (k in state_dict): print('No param {}.'.format(k) + msg) state_dict[k] = model_state_dict[k] model.load_state_dict(state_dict, strict=False) # resume optimizer parameters if optimizer is not None and resume: if 'optimizer' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = checkpoint['epoch'] start_lr = lr for step in lr_step: if start_epoch >= step: start_lr *= 0.1 for param_group in optimizer.param_groups: param_group['lr'] = start_lr print('Resumed optimizer with start lr', start_lr) else: print('No optimizer parameters in checkpoint.') if optimizer is not None: return model, optimizer, start_epoch else: return model def save_model(path, epoch, model, optimizer=None): if isinstance(model, torch.nn.DataParallel): state_dict = model.module.state_dict() else: state_dict = model.state_dict() data = {'epoch': epoch, 'state_dict': state_dict} if not (optimizer is None): data['optimizer'] = optimizer.state_dict() torch.save(data, path)
3,415
34.216495
80
py
SyNet
SyNet-master/CenterNet/src/lib/models/scatter_gather.py
import torch from torch.autograd import Variable from torch.nn.parallel._functions import Scatter, Gather def scatter(inputs, target_gpus, dim=0, chunk_sizes=None): r""" Slices variables into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not variables. Does not support Tensors. """ def scatter_map(obj): if isinstance(obj, Variable): return Scatter.apply(target_gpus, chunk_sizes, dim, obj) assert not torch.is_tensor(obj), "Tensors not supported in scatter." if isinstance(obj, tuple): return list(zip(*map(scatter_map, obj))) if isinstance(obj, list): return list(map(list, zip(*map(scatter_map, obj)))) if isinstance(obj, dict): return list(map(type(obj), zip(*map(scatter_map, obj.items())))) return [obj for targets in target_gpus] return scatter_map(inputs) def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None): r"""Scatter with support for kwargs dictionary""" inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else [] kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs
1,535
38.384615
77
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/resnet_dcn.py
# Modified by Dequan Wang and Xingyi Zhou from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import math import logging import torch import torch.nn as nn from .DCNv2.dcn_v2 import DCN import torch.utils.model_zoo as model_zoo BN_MOMENTUM = 0.1 logger = logging.getLogger(__name__) model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out def fill_up_weights(up): w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2. * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = \ (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] def fill_fc_weights(layers): for m in layers.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu') # torch.nn.init.xavier_normal_(m.weight.data) if m.bias is not None: nn.init.constant_(m.bias, 0) class PoseResNet(nn.Module): def __init__(self, block, layers, heads, head_conv): self.inplanes = 64 self.heads = heads self.deconv_with_bias = False super(PoseResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # used for deconv layers self.deconv_layers = self._make_deconv_layer( 3, [256, 128, 64], [4, 4, 4], ) for head in self.heads: classes = self.heads[head] if head_conv > 0: fc = nn.Sequential( nn.Conv2d(64, head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, classes, kernel_size=1, stride=1, padding=0, bias=True)) if 'hm' in head: fc[-1].bias.data.fill_(-2.19) else: fill_fc_weights(fc) else: fc = nn.Conv2d(64, classes, kernel_size=1, stride=1, padding=0, bias=True) if 'hm' in head: fc.bias.data.fill_(-2.19) else: fill_fc_weights(fc) self.__setattr__(head, fc) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _get_deconv_cfg(self, deconv_kernel, index): if deconv_kernel == 4: padding = 1 output_padding = 0 elif deconv_kernel == 3: padding = 1 output_padding = 1 elif deconv_kernel == 2: padding = 0 output_padding = 0 return deconv_kernel, padding, output_padding def _make_deconv_layer(self, num_layers, num_filters, num_kernels): assert num_layers == len(num_filters), \ 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert num_layers == len(num_kernels), \ 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(num_layers): kernel, padding, output_padding = \ self._get_deconv_cfg(num_kernels[i], i) planes = num_filters[i] fc = DCN(self.inplanes, planes, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1) # fc = nn.Conv2d(self.inplanes, planes, # kernel_size=3, stride=1, # padding=1, dilation=1, bias=False) # fill_fc_weights(fc) up = nn.ConvTranspose2d( in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) fill_up_weights(up) layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.deconv_layers(x) ret = {} for head in self.heads: ret[head] = self.__getattr__(head)(x) return [ret] def init_weights(self, num_layers): if 1: url = model_urls['resnet{}'.format(num_layers)] pretrained_state_dict = model_zoo.load_url(url) print('=> loading pretrained model {}'.format(url)) self.load_state_dict(pretrained_state_dict, strict=False) print('=> init deconv weights from normal distribution') for name, m in self.deconv_layers.named_modules(): if isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]), 34: (BasicBlock, [3, 4, 6, 3]), 50: (Bottleneck, [3, 4, 6, 3]), 101: (Bottleneck, [3, 4, 23, 3]), 152: (Bottleneck, [3, 8, 36, 3])} def get_pose_net(num_layers, heads, head_conv=256): block_class, layers = resnet_spec[num_layers] model = PoseResNet(block_class, layers, heads, head_conv=head_conv) model.init_weights(num_layers) return model
10,054
33.553265
80
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/pose_dla_dcn.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import math import logging import numpy as np from os.path import join import torch from torch import nn import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from .DCNv2.dcn_v2 import DCN BN_MOMENTUM = 0.1 logger = logging.getLogger(__name__) def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'): return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash)) def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): def __init__(self, inplanes, planes, stride=1, dilation=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 2 def __init__(self, inplanes, planes, stride=1, dilation=1): super(Bottleneck, self).__init__() expansion = Bottleneck.expansion bottle_planes = planes // expansion self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += residual out = self.relu(out) return out class BottleneckX(nn.Module): expansion = 2 cardinality = 32 def __init__(self, inplanes, planes, stride=1, dilation=1): super(BottleneckX, self).__init__() cardinality = BottleneckX.cardinality # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0))) # bottle_planes = dim * cardinality bottle_planes = planes * cardinality // 32 self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += residual out = self.relu(out) return out class Root(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, residual): super(Root, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.residual = residual def forward(self, *x): children = x x = self.conv(torch.cat(x, 1)) x = self.bn(x) if self.residual: x += children[0] x = self.relu(x) return x class Tree(nn.Module): def __init__(self, levels, block, in_channels, out_channels, stride=1, level_root=False, root_dim=0, root_kernel_size=1, dilation=1, root_residual=False): super(Tree, self).__init__() if root_dim == 0: root_dim = 2 * out_channels if level_root: root_dim += in_channels if levels == 1: self.tree1 = block(in_channels, out_channels, stride, dilation=dilation) self.tree2 = block(out_channels, out_channels, 1, dilation=dilation) else: self.tree1 = Tree(levels - 1, block, in_channels, out_channels, stride, root_dim=0, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) self.tree2 = Tree(levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) if levels == 1: self.root = Root(root_dim, out_channels, root_kernel_size, root_residual) self.level_root = level_root self.root_dim = root_dim self.downsample = None self.project = None self.levels = levels if stride > 1: self.downsample = nn.MaxPool2d(stride, stride=stride) if in_channels != out_channels: self.project = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM) ) def forward(self, x, residual=None, children=None): children = [] if children is None else children bottom = self.downsample(x) if self.downsample else x residual = self.project(bottom) if self.project else bottom if self.level_root: children.append(bottom) x1 = self.tree1(x, residual) if self.levels == 1: x2 = self.tree2(x1) x = self.root(x2, x1, *children) else: children.append(x1) x = self.tree2(x1, children=children) return x class DLA(nn.Module): def __init__(self, levels, channels, num_classes=1000, block=BasicBlock, residual_root=False, linear_root=False): super(DLA, self).__init__() self.channels = channels self.num_classes = num_classes self.base_layer = nn.Sequential( nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM), nn.ReLU(inplace=True)) self.level0 = self._make_conv_level( channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level( channels[0], channels[1], levels[1], stride=2) self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, level_root=False, root_residual=residual_root) self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, level_root=True, root_residual=residual_root) self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, level_root=True, root_residual=residual_root) self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, level_root=True, root_residual=residual_root) # for m in self.modules(): # if isinstance(m, nn.Conv2d): # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # m.weight.data.normal_(0, math.sqrt(2. / n)) # elif isinstance(m, nn.BatchNorm2d): # m.weight.data.fill_(1) # m.bias.data.zero_() def _make_level(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential( nn.MaxPool2d(stride, stride=stride), nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(planes, momentum=BN_MOMENTUM), ) layers = [] layers.append(block(inplanes, planes, stride, downsample=downsample)) for i in range(1, blocks): layers.append(block(inplanes, planes)) return nn.Sequential(*layers) def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([ nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), nn.BatchNorm2d(planes, momentum=BN_MOMENTUM), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) def forward(self, x): y = [] x = self.base_layer(x) for i in range(6): x = getattr(self, 'level{}'.format(i))(x) y.append(x) return y def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'): # fc = self.fc if name.endswith('.pth'): model_weights = torch.load(data + name) else: model_url = get_model_url(data, name, hash) model_weights = model_zoo.load_url(model_url) num_classes = len(model_weights[list(model_weights.keys())[-1]]) self.fc = nn.Conv2d( self.channels[-1], num_classes, kernel_size=1, stride=1, padding=0, bias=True) self.load_state_dict(model_weights) # self.fc = fc def dla34(pretrained=True, **kwargs): # DLA-34 model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=BasicBlock, **kwargs) if pretrained: model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86') return model class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x def fill_fc_weights(layers): for m in layers.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: nn.init.constant_(m.bias, 0) def fill_up_weights(up): w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2. * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = \ (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] class DeformConv(nn.Module): def __init__(self, chi, cho): super(DeformConv, self).__init__() self.actf = nn.Sequential( nn.BatchNorm2d(cho, momentum=BN_MOMENTUM), nn.ReLU(inplace=True) ) self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1) def forward(self, x): x = self.conv(x) x = self.actf(x) return x class IDAUp(nn.Module): def __init__(self, o, channels, up_f): super(IDAUp, self).__init__() for i in range(1, len(channels)): c = channels[i] f = int(up_f[i]) proj = DeformConv(c, o) node = DeformConv(o, o) up = nn.ConvTranspose2d(o, o, f * 2, stride=f, padding=f // 2, output_padding=0, groups=o, bias=False) fill_up_weights(up) setattr(self, 'proj_' + str(i), proj) setattr(self, 'up_' + str(i), up) setattr(self, 'node_' + str(i), node) def forward(self, layers, startp, endp): for i in range(startp + 1, endp): upsample = getattr(self, 'up_' + str(i - startp)) project = getattr(self, 'proj_' + str(i - startp)) layers[i] = upsample(project(layers[i])) node = getattr(self, 'node_' + str(i - startp)) layers[i] = node(layers[i] + layers[i - 1]) class DLAUp(nn.Module): def __init__(self, startp, channels, scales, in_channels=None): super(DLAUp, self).__init__() self.startp = startp if in_channels is None: in_channels = channels self.channels = channels channels = list(channels) scales = np.array(scales, dtype=int) for i in range(len(channels) - 1): j = -i - 2 setattr(self, 'ida_{}'.format(i), IDAUp(channels[j], in_channels[j:], scales[j:] // scales[j])) scales[j + 1:] = scales[j] in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] def forward(self, layers): out = [layers[-1]] # start with 32 for i in range(len(layers) - self.startp - 1): ida = getattr(self, 'ida_{}'.format(i)) ida(layers, len(layers) -i - 2, len(layers)) out.insert(0, layers[-1]) return out class Interpolate(nn.Module): def __init__(self, scale, mode): super(Interpolate, self).__init__() self.scale = scale self.mode = mode def forward(self, x): x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False) return x class DLASeg(nn.Module): def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel, last_level, head_conv, out_channel=0): super(DLASeg, self).__init__() assert down_ratio in [2, 4, 8, 16] self.first_level = int(np.log2(down_ratio)) self.last_level = last_level self.base = globals()[base_name](pretrained=pretrained) channels = self.base.channels scales = [2 ** i for i in range(len(channels[self.first_level:]))] self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales) if out_channel == 0: out_channel = channels[self.first_level] self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level], [2 ** i for i in range(self.last_level - self.first_level)]) self.heads = heads for head in self.heads: classes = self.heads[head] if head_conv > 0: fc = nn.Sequential( nn.Conv2d(channels[self.first_level], head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, classes, kernel_size=final_kernel, stride=1, padding=final_kernel // 2, bias=True)) if 'hm' in head: fc[-1].bias.data.fill_(-2.19) else: fill_fc_weights(fc) else: fc = nn.Conv2d(channels[self.first_level], classes, kernel_size=final_kernel, stride=1, padding=final_kernel // 2, bias=True) if 'hm' in head: fc.bias.data.fill_(-2.19) else: fill_fc_weights(fc) self.__setattr__(head, fc) def forward(self, x): x = self.base(x) x = self.dla_up(x) y = [] for i in range(self.last_level - self.first_level): y.append(x[i].clone()) self.ida_up(y, 0, len(y)) z = {} for head in self.heads: z[head] = self.__getattr__(head)(y[-1]) return [z] def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4): model = DLASeg('dla{}'.format(num_layers), heads, pretrained=True, down_ratio=down_ratio, final_kernel=1, last_level=5, head_conv=head_conv) return model
17,594
34.617409
106
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/msra_resnet.py
# Modified by Xingyi Zhou from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo BN_MOMENTUM = 0.1 model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class PoseResNet(nn.Module): def __init__(self, block, layers, heads, head_conv, **kwargs): self.inplanes = 64 self.deconv_with_bias = False self.heads = heads super(PoseResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # used for deconv layers self.deconv_layers = self._make_deconv_layer( 3, [256, 256, 256], [4, 4, 4], ) # self.final_layer = [] for head in sorted(self.heads): num_output = self.heads[head] if head_conv > 0: fc = nn.Sequential( nn.Conv2d(256, head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, num_output, kernel_size=1, stride=1, padding=0)) else: fc = nn.Conv2d( in_channels=256, out_channels=num_output, kernel_size=1, stride=1, padding=0 ) self.__setattr__(head, fc) # self.final_layer = nn.ModuleList(self.final_layer) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _get_deconv_cfg(self, deconv_kernel, index): if deconv_kernel == 4: padding = 1 output_padding = 0 elif deconv_kernel == 3: padding = 1 output_padding = 1 elif deconv_kernel == 2: padding = 0 output_padding = 0 return deconv_kernel, padding, output_padding def _make_deconv_layer(self, num_layers, num_filters, num_kernels): assert num_layers == len(num_filters), \ 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert num_layers == len(num_kernels), \ 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(num_layers): kernel, padding, output_padding = \ self._get_deconv_cfg(num_kernels[i], i) planes = num_filters[i] layers.append( nn.ConvTranspose2d( in_channels=self.inplanes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias)) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.deconv_layers(x) ret = {} for head in self.heads: ret[head] = self.__getattr__(head)(x) return [ret] def init_weights(self, num_layers, pretrained=True): if pretrained: # print('=> init resnet deconv weights from normal distribution') for _, m in self.deconv_layers.named_modules(): if isinstance(m, nn.ConvTranspose2d): # print('=> init {}.weight as normal(0, 0.001)'.format(name)) # print('=> init {}.bias as 0'.format(name)) nn.init.normal_(m.weight, std=0.001) if self.deconv_with_bias: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): # print('=> init {}.weight as 1'.format(name)) # print('=> init {}.bias as 0'.format(name)) nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # print('=> init final conv weights from normal distribution') for head in self.heads: final_layer = self.__getattr__(head) for i, m in enumerate(final_layer.modules()): if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') # print('=> init {}.weight as normal(0, 0.001)'.format(name)) # print('=> init {}.bias as 0'.format(name)) if m.weight.shape[0] == self.heads[head]: if 'hm' in head: nn.init.constant_(m.bias, -2.19) else: nn.init.normal_(m.weight, std=0.001) nn.init.constant_(m.bias, 0) #pretrained_state_dict = torch.load(pretrained) url = model_urls['resnet{}'.format(num_layers)] pretrained_state_dict = model_zoo.load_url(url) print('=> loading pretrained model {}'.format(url)) self.load_state_dict(pretrained_state_dict, strict=False) else: print('=> imagenet pretrained model dose not exist') print('=> please download it first') raise ValueError('imagenet pretrained model does not exist') resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]), 34: (BasicBlock, [3, 4, 6, 3]), 50: (Bottleneck, [3, 4, 6, 3]), 101: (Bottleneck, [3, 4, 23, 3]), 152: (Bottleneck, [3, 8, 36, 3])} def get_pose_net(num_layers, heads, head_conv): block_class, layers = resnet_spec[num_layers] model = PoseResNet(block_class, layers, heads, head_conv=head_conv) model.init_weights(num_layers, pretrained=True) return model
10,167
35.185053
94
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/large_hourglass.py
# This code is base on from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import torch import torch.nn as nn class convolution(nn.Module): def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True): super(convolution, self).__init__() pad = (k - 1) // 2 self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn) self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential() self.relu = nn.ReLU(inplace=True) def forward(self, x): conv = self.conv(x) bn = self.bn(conv) relu = self.relu(bn) return relu class fully_connected(nn.Module): def __init__(self, inp_dim, out_dim, with_bn=True): super(fully_connected, self).__init__() self.with_bn = with_bn self.linear = nn.Linear(inp_dim, out_dim) if self.with_bn: self.bn = nn.BatchNorm1d(out_dim) self.relu = nn.ReLU(inplace=True) def forward(self, x): linear = self.linear(x) bn = self.bn(linear) if self.with_bn else linear relu = self.relu(bn) return relu class residual(nn.Module): def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True): super(residual, self).__init__() self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False) self.bn1 = nn.BatchNorm2d(out_dim) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False) self.bn2 = nn.BatchNorm2d(out_dim) self.skip = nn.Sequential( nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False), nn.BatchNorm2d(out_dim) ) if stride != 1 or inp_dim != out_dim else nn.Sequential() self.relu = nn.ReLU(inplace=True) def forward(self, x): conv1 = self.conv1(x) bn1 = self.bn1(conv1) relu1 = self.relu1(bn1) conv2 = self.conv2(relu1) bn2 = self.bn2(conv2) skip = self.skip(x) return self.relu(bn2 + skip) def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs): layers = [layer(k, inp_dim, out_dim, **kwargs)] for _ in range(1, modules): layers.append(layer(k, out_dim, out_dim, **kwargs)) return nn.Sequential(*layers) def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs): layers = [] for _ in range(modules - 1): layers.append(layer(k, inp_dim, inp_dim, **kwargs)) layers.append(layer(k, inp_dim, out_dim, **kwargs)) return nn.Sequential(*layers) class MergeUp(nn.Module): def forward(self, up1, up2): return up1 + up2 def make_merge_layer(dim): return MergeUp() # def make_pool_layer(dim): # return nn.MaxPool2d(kernel_size=2, stride=2) def make_pool_layer(dim): return nn.Sequential() def make_unpool_layer(dim): return nn.Upsample(scale_factor=2) def make_kp_layer(cnv_dim, curr_dim, out_dim): return nn.Sequential( convolution(3, cnv_dim, curr_dim, with_bn=False), nn.Conv2d(curr_dim, out_dim, (1, 1)) ) def make_inter_layer(dim): return residual(3, dim, dim) def make_cnv_layer(inp_dim, out_dim): return convolution(3, inp_dim, out_dim) class kp_module(nn.Module): def __init__( self, n, dims, modules, layer=residual, make_up_layer=make_layer, make_low_layer=make_layer, make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer, **kwargs ): super(kp_module, self).__init__() self.n = n curr_mod = modules[0] next_mod = modules[1] curr_dim = dims[0] next_dim = dims[1] self.up1 = make_up_layer( 3, curr_dim, curr_dim, curr_mod, layer=layer, **kwargs ) self.max1 = make_pool_layer(curr_dim) self.low1 = make_hg_layer( 3, curr_dim, next_dim, curr_mod, layer=layer, **kwargs ) self.low2 = kp_module( n - 1, dims[1:], modules[1:], layer=layer, make_up_layer=make_up_layer, make_low_layer=make_low_layer, make_hg_layer=make_hg_layer, make_hg_layer_revr=make_hg_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer, **kwargs ) if self.n > 1 else \ make_low_layer( 3, next_dim, next_dim, next_mod, layer=layer, **kwargs ) self.low3 = make_hg_layer_revr( 3, next_dim, curr_dim, curr_mod, layer=layer, **kwargs ) self.up2 = make_unpool_layer(curr_dim) self.merge = make_merge_layer(curr_dim) def forward(self, x): up1 = self.up1(x) max1 = self.max1(x) low1 = self.low1(max1) low2 = self.low2(low1) low3 = self.low3(low2) up2 = self.up2(low3) return self.merge(up1, up2) class exkp(nn.Module): def __init__( self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256, make_tl_layer=None, make_br_layer=None, make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer, make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer, make_up_layer=make_layer, make_low_layer=make_layer, make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer, kp_layer=residual ): super(exkp, self).__init__() self.nstack = nstack self.heads = heads curr_dim = dims[0] self.pre = nn.Sequential( convolution(7, 3, 128, stride=2), residual(3, 128, 256, stride=2) ) if pre is None else pre self.kps = nn.ModuleList([ kp_module( n, dims, modules, layer=kp_layer, make_up_layer=make_up_layer, make_low_layer=make_low_layer, make_hg_layer=make_hg_layer, make_hg_layer_revr=make_hg_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer ) for _ in range(nstack) ]) self.cnvs = nn.ModuleList([ make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack) ]) self.inters = nn.ModuleList([ make_inter_layer(curr_dim) for _ in range(nstack - 1) ]) self.inters_ = nn.ModuleList([ nn.Sequential( nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False), nn.BatchNorm2d(curr_dim) ) for _ in range(nstack - 1) ]) self.cnvs_ = nn.ModuleList([ nn.Sequential( nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False), nn.BatchNorm2d(curr_dim) ) for _ in range(nstack - 1) ]) ## keypoint heatmaps for head in heads.keys(): if 'hm' in head: module = nn.ModuleList([ make_heat_layer( cnv_dim, curr_dim, heads[head]) for _ in range(nstack) ]) self.__setattr__(head, module) for heat in self.__getattr__(head): heat[-1].bias.data.fill_(-2.19) else: module = nn.ModuleList([ make_regr_layer( cnv_dim, curr_dim, heads[head]) for _ in range(nstack) ]) self.__setattr__(head, module) self.relu = nn.ReLU(inplace=True) def forward(self, image): # print('image shape', image.shape) inter = self.pre(image) outs = [] for ind in range(self.nstack): kp_, cnv_ = self.kps[ind], self.cnvs[ind] kp = kp_(inter) cnv = cnv_(kp) out = {} for head in self.heads: layer = self.__getattr__(head)[ind] y = layer(cnv) out[head] = y outs.append(out) if ind < self.nstack - 1: inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv) inter = self.relu(inter) inter = self.inters[ind](inter) return outs def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs): layers = [layer(kernel, dim0, dim1, stride=2)] layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)] return nn.Sequential(*layers) class HourglassNet(exkp): def __init__(self, heads, num_stacks=2): n = 5 dims = [256, 256, 384, 384, 384, 512] modules = [2, 2, 2, 2, 2, 4] super(HourglassNet, self).__init__( n, num_stacks, dims, modules, heads, make_tl_layer=None, make_br_layer=None, make_pool_layer=make_pool_layer, make_hg_layer=make_hg_layer, kp_layer=residual, cnv_dim=256 ) def get_large_hourglass_net(num_layers, heads, head_conv): model = HourglassNet(heads, 2) return model
9,942
32.033223
118
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/dlav0.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from os.path import join import torch from torch import nn import torch.utils.model_zoo as model_zoo import numpy as np BatchNorm = nn.BatchNorm2d def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'): return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash)) def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): def __init__(self, inplanes, planes, stride=1, dilation=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn1 = BatchNorm(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) self.bn2 = BatchNorm(planes) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 2 def __init__(self, inplanes, planes, stride=1, dilation=1): super(Bottleneck, self).__init__() expansion = Bottleneck.expansion bottle_planes = planes // expansion self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False) self.bn1 = BatchNorm(bottle_planes) self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = BatchNorm(bottle_planes) self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False) self.bn3 = BatchNorm(planes) self.relu = nn.ReLU(inplace=True) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += residual out = self.relu(out) return out class BottleneckX(nn.Module): expansion = 2 cardinality = 32 def __init__(self, inplanes, planes, stride=1, dilation=1): super(BottleneckX, self).__init__() cardinality = BottleneckX.cardinality # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0))) # bottle_planes = dim * cardinality bottle_planes = planes * cardinality // 32 self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False) self.bn1 = BatchNorm(bottle_planes) self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) self.bn2 = BatchNorm(bottle_planes) self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False) self.bn3 = BatchNorm(planes) self.relu = nn.ReLU(inplace=True) self.stride = stride def forward(self, x, residual=None): if residual is None: residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += residual out = self.relu(out) return out class Root(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, residual): super(Root, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) self.bn = BatchNorm(out_channels) self.relu = nn.ReLU(inplace=True) self.residual = residual def forward(self, *x): children = x x = self.conv(torch.cat(x, 1)) x = self.bn(x) if self.residual: x += children[0] x = self.relu(x) return x class Tree(nn.Module): def __init__(self, levels, block, in_channels, out_channels, stride=1, level_root=False, root_dim=0, root_kernel_size=1, dilation=1, root_residual=False): super(Tree, self).__init__() if root_dim == 0: root_dim = 2 * out_channels if level_root: root_dim += in_channels if levels == 1: self.tree1 = block(in_channels, out_channels, stride, dilation=dilation) self.tree2 = block(out_channels, out_channels, 1, dilation=dilation) else: self.tree1 = Tree(levels - 1, block, in_channels, out_channels, stride, root_dim=0, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) self.tree2 = Tree(levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) if levels == 1: self.root = Root(root_dim, out_channels, root_kernel_size, root_residual) self.level_root = level_root self.root_dim = root_dim self.downsample = None self.project = None self.levels = levels if stride > 1: self.downsample = nn.MaxPool2d(stride, stride=stride) if in_channels != out_channels: self.project = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), BatchNorm(out_channels) ) def forward(self, x, residual=None, children=None): children = [] if children is None else children bottom = self.downsample(x) if self.downsample else x residual = self.project(bottom) if self.project else bottom if self.level_root: children.append(bottom) x1 = self.tree1(x, residual) if self.levels == 1: x2 = self.tree2(x1) x = self.root(x2, x1, *children) else: children.append(x1) x = self.tree2(x1, children=children) return x class DLA(nn.Module): def __init__(self, levels, channels, num_classes=1000, block=BasicBlock, residual_root=False, return_levels=False, pool_size=7, linear_root=False): super(DLA, self).__init__() self.channels = channels self.return_levels = return_levels self.num_classes = num_classes self.base_layer = nn.Sequential( nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True)) self.level0 = self._make_conv_level( channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level( channels[0], channels[1], levels[1], stride=2) self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, level_root=False, root_residual=residual_root) self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, level_root=True, root_residual=residual_root) self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, level_root=True, root_residual=residual_root) self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, level_root=True, root_residual=residual_root) self.avgpool = nn.AvgPool2d(pool_size) self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1, stride=1, padding=0, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, BatchNorm): m.weight.data.fill_(1) m.bias.data.zero_() def _make_level(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential( nn.MaxPool2d(stride, stride=stride), nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False), BatchNorm(planes), ) layers = [] layers.append(block(inplanes, planes, stride, downsample=downsample)) for i in range(1, blocks): layers.append(block(inplanes, planes)) return nn.Sequential(*layers) def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([ nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), BatchNorm(planes), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) def forward(self, x): y = [] x = self.base_layer(x) for i in range(6): x = getattr(self, 'level{}'.format(i))(x) y.append(x) if self.return_levels: return y else: x = self.avgpool(x) x = self.fc(x) x = x.view(x.size(0), -1) return x def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'): fc = self.fc if name.endswith('.pth'): model_weights = torch.load(data + name) else: model_url = get_model_url(data, name, hash) model_weights = model_zoo.load_url(model_url) num_classes = len(model_weights[list(model_weights.keys())[-1]]) self.fc = nn.Conv2d( self.channels[-1], num_classes, kernel_size=1, stride=1, padding=0, bias=True) self.load_state_dict(model_weights) self.fc = fc def dla34(pretrained, **kwargs): # DLA-34 model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=BasicBlock, **kwargs) if pretrained: model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86') return model def dla46_c(pretrained=None, **kwargs): # DLA-46-C Bottleneck.expansion = 2 model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 64, 128, 256], block=Bottleneck, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla46_c') return model def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C BottleneckX.expansion = 2 model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 64, 128, 256], block=BottleneckX, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla46x_c') return model def dla60x_c(pretrained, **kwargs): # DLA-X-60-C BottleneckX.expansion = 2 model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256], block=BottleneckX, **kwargs) if pretrained: model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c') return model def dla60(pretrained=None, **kwargs): # DLA-60 Bottleneck.expansion = 2 model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], block=Bottleneck, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla60') return model def dla60x(pretrained=None, **kwargs): # DLA-X-60 BottleneckX.expansion = 2 model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], block=BottleneckX, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla60x') return model def dla102(pretrained=None, **kwargs): # DLA-102 Bottleneck.expansion = 2 model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], block=Bottleneck, residual_root=True, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla102') return model def dla102x(pretrained=None, **kwargs): # DLA-X-102 BottleneckX.expansion = 2 model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], block=BottleneckX, residual_root=True, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla102x') return model def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64 BottleneckX.cardinality = 64 model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], block=BottleneckX, residual_root=True, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla102x2') return model def dla169(pretrained=None, **kwargs): # DLA-169 Bottleneck.expansion = 2 model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024], block=Bottleneck, residual_root=True, **kwargs) if pretrained is not None: model.load_pretrained_model(pretrained, 'dla169') return model def set_bn(bn): global BatchNorm BatchNorm = bn dla.BatchNorm = bn class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x def fill_up_weights(up): w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2. * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = \ (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] class IDAUp(nn.Module): def __init__(self, node_kernel, out_dim, channels, up_factors): super(IDAUp, self).__init__() self.channels = channels self.out_dim = out_dim for i, c in enumerate(channels): if c == out_dim: proj = Identity() else: proj = nn.Sequential( nn.Conv2d(c, out_dim, kernel_size=1, stride=1, bias=False), BatchNorm(out_dim), nn.ReLU(inplace=True)) f = int(up_factors[i]) if f == 1: up = Identity() else: up = nn.ConvTranspose2d( out_dim, out_dim, f * 2, stride=f, padding=f // 2, output_padding=0, groups=out_dim, bias=False) fill_up_weights(up) setattr(self, 'proj_' + str(i), proj) setattr(self, 'up_' + str(i), up) for i in range(1, len(channels)): node = nn.Sequential( nn.Conv2d(out_dim * 2, out_dim, kernel_size=node_kernel, stride=1, padding=node_kernel // 2, bias=False), BatchNorm(out_dim), nn.ReLU(inplace=True)) setattr(self, 'node_' + str(i), node) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, BatchNorm): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, layers): assert len(self.channels) == len(layers), \ '{} vs {} layers'.format(len(self.channels), len(layers)) layers = list(layers) for i, l in enumerate(layers): upsample = getattr(self, 'up_' + str(i)) project = getattr(self, 'proj_' + str(i)) layers[i] = upsample(project(l)) x = layers[0] y = [] for i in range(1, len(layers)): node = getattr(self, 'node_' + str(i)) x = node(torch.cat([x, layers[i]], 1)) y.append(x) return x, y class DLAUp(nn.Module): def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None): super(DLAUp, self).__init__() if in_channels is None: in_channels = channels self.channels = channels channels = list(channels) scales = np.array(scales, dtype=int) for i in range(len(channels) - 1): j = -i - 2 setattr(self, 'ida_{}'.format(i), IDAUp(3, channels[j], in_channels[j:], scales[j:] // scales[j])) scales[j + 1:] = scales[j] in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] def forward(self, layers): layers = list(layers) assert len(layers) > 1 for i in range(len(layers) - 1): ida = getattr(self, 'ida_{}'.format(i)) x, y = ida(layers[-i - 2:]) layers[-i - 1:] = y return x def fill_fc_weights(layers): for m in layers.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu') # torch.nn.init.xavier_normal_(m.weight.data) if m.bias is not None: nn.init.constant_(m.bias, 0) class DLASeg(nn.Module): def __init__(self, base_name, heads, pretrained=True, down_ratio=4, head_conv=256): super(DLASeg, self).__init__() assert down_ratio in [2, 4, 8, 16] self.heads = heads self.first_level = int(np.log2(down_ratio)) self.base = globals()[base_name]( pretrained=pretrained, return_levels=True) channels = self.base.channels scales = [2 ** i for i in range(len(channels[self.first_level:]))] self.dla_up = DLAUp(channels[self.first_level:], scales=scales) ''' self.fc = nn.Sequential( nn.Conv2d(channels[self.first_level], classes, kernel_size=1, stride=1, padding=0, bias=True) ) ''' for head in self.heads: classes = self.heads[head] if head_conv > 0: fc = nn.Sequential( nn.Conv2d(channels[self.first_level], head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, classes, kernel_size=1, stride=1, padding=0, bias=True)) if 'hm' in head: fc[-1].bias.data.fill_(-2.19) else: fill_fc_weights(fc) else: fc = nn.Conv2d(channels[self.first_level], classes, kernel_size=1, stride=1, padding=0, bias=True) if 'hm' in head: fc.bias.data.fill_(-2.19) else: fill_fc_weights(fc) self.__setattr__(head, fc) ''' up_factor = 2 ** self.first_level if up_factor > 1: up = nn.ConvTranspose2d(classes, classes, up_factor * 2, stride=up_factor, padding=up_factor // 2, output_padding=0, groups=classes, bias=False) fill_up_weights(up) up.weight.requires_grad = False else: up = Identity() self.up = up self.softmax = nn.LogSoftmax(dim=1) for m in self.fc.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, BatchNorm): m.weight.data.fill_(1) m.bias.data.zero_() ''' def forward(self, x): x = self.base(x) x = self.dla_up(x[self.first_level:]) # x = self.fc(x) # y = self.softmax(self.up(x)) ret = {} for head in self.heads: ret[head] = self.__getattr__(head)(x) return [ret] ''' def optim_parameters(self, memo=None): for param in self.base.parameters(): yield param for param in self.dla_up.parameters(): yield param for param in self.fc.parameters(): yield param ''' ''' def dla34up(classes, pretrained_base=None, **kwargs): model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs) return model def dla60up(classes, pretrained_base=None, **kwargs): model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs) return model def dla102up(classes, pretrained_base=None, **kwargs): model = DLASeg('dla102', classes, pretrained_base=pretrained_base, **kwargs) return model def dla169up(classes, pretrained_base=None, **kwargs): model = DLASeg('dla169', classes, pretrained_base=pretrained_base, **kwargs) return model ''' def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4): model = DLASeg('dla{}'.format(num_layers), heads, pretrained=True, down_ratio=down_ratio, head_conv=head_conv) return model
22,682
34.00463
86
py
SyNet
SyNet-master/CenterNet/src/lib/models/networks/DCNv2/setup.py
import os import glob import torch from torch.utils.cpp_extension import CUDA_HOME from torch.utils.cpp_extension import CppExtension from torch.utils.cpp_extension import CUDAExtension from setuptools import find_packages from setuptools import setup requirements = ["torch", "torchvision"] def get_extensions(): this_dir = os.path.dirname(os.path.abspath(__file__)) extensions_dir = os.path.join(this_dir, "src") main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) os.environ["CC"] = "g++" sources = main_file + source_cpu extension = CppExtension extra_compile_args = {"cxx": []} define_macros = [] if torch.cuda.is_available() and CUDA_HOME is not None: extension = CUDAExtension sources += source_cuda define_macros += [("WITH_CUDA", None)] extra_compile_args["nvcc"] = [ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] else: #raise NotImplementedError('Cuda is not available') pass sources = [os.path.join(extensions_dir, s) for s in sources] include_dirs = [extensions_dir] ext_modules = [ extension( "_ext", sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, ) ] return ext_modules setup( name="DCNv2", version="0.1", author="charlesshang", url="https://github.com/charlesshang/DCNv2", description="deformable convolutional networks", packages=find_packages(exclude=("configs", "tests",)), # install_requires=requirements, ext_modules=get_extensions(), cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, )
2,035
27.676056
73
py
SyNet
SyNet-master/CenterNet/src/lib/trains/train_factory.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from .ctdet import CtdetTrainer from .ddd import DddTrainer from .exdet import ExdetTrainer from .multi_pose import MultiPoseTrainer train_factory = { 'exdet': ExdetTrainer, 'ddd': DddTrainer, 'ctdet': CtdetTrainer, 'multi_pose': MultiPoseTrainer, }
371
22.25
40
py
SyNet
SyNet-master/CenterNet/src/lib/trains/exdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import numpy as np import cv2 import sys import time from utils.debugger import Debugger from models.data_parallel import DataParallel from models.losses import FocalLoss, RegL1Loss from models.decode import agnex_ct_decode, exct_decode from models.utils import _sigmoid from .base_trainer import BaseTrainer class ExdetLoss(torch.nn.Module): def __init__(self, opt): super(ExdetLoss, self).__init__() self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss() self.crit_reg = RegL1Loss() self.opt = opt self.parts = ['t', 'l', 'b', 'r', 'c'] def forward(self, outputs, batch): opt = self.opt hm_loss, reg_loss = 0, 0 for s in range(opt.num_stacks): output = outputs[s] for p in self.parts: tag = 'hm_{}'.format(p) output[tag] = _sigmoid(output[tag]) hm_loss += self.crit(output[tag], batch[tag]) / opt.num_stacks if p != 'c' and opt.reg_offset and opt.off_weight > 0: reg_loss += self.crit_reg(output['reg_{}'.format(p)], batch['reg_mask'], batch['ind_{}'.format(p)], batch['reg_{}'.format(p)]) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.off_weight * reg_loss loss_stats = {'loss': loss, 'off_loss': reg_loss, 'hm_loss': hm_loss} return loss, loss_stats class ExdetTrainer(BaseTrainer): def __init__(self, opt, model, optimizer=None): super(ExdetTrainer, self).__init__(opt, model, optimizer=optimizer) self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode def _get_losses(self, opt): loss_states = ['loss', 'hm_loss', 'off_loss'] loss = ExdetLoss(opt) return loss_states, loss def debug(self, batch, output, iter_id): opt = self.opt detections = self.decode(output['hm_t'], output['hm_l'], output['hm_b'], output['hm_r'], output['hm_c']).detach().cpu().numpy() detections[:, :, :4] *= opt.input_res / opt.output_res for i in range(1): debugger = Debugger( dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme) pred_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8) gt_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8) img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0) img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8) for p in self.parts: tag = 'hm_{}'.format(p) pred = debugger.gen_colormap(output[tag][i].detach().cpu().numpy()) gt = debugger.gen_colormap(batch[tag][i].detach().cpu().numpy()) if p != 'c': pred_hm = np.maximum(pred_hm, pred) gt_hm = np.maximum(gt_hm, gt) if p == 'c' or opt.debug > 2: debugger.add_blend_img(img, pred, 'pred_{}'.format(p)) debugger.add_blend_img(img, gt, 'gt_{}'.format(p)) debugger.add_blend_img(img, pred_hm, 'pred') debugger.add_blend_img(img, gt_hm, 'gt') debugger.add_img(img, img_id='out') for k in range(len(detections[i])): if detections[i, k, 4] > 0.1: debugger.add_coco_bbox(detections[i, k, :4], detections[i, k, -1], detections[i, k, 4], img_id='out') if opt.debug == 4: debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id)) else: debugger.show_all_imgs(pause=True)
3,605
40.930233
79
py
SyNet
SyNet-master/CenterNet/src/lib/trains/ctdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import numpy as np from models.losses import FocalLoss from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss from models.decode import ctdet_decode from models.utils import _sigmoid from utils.debugger import Debugger from utils.post_process import ctdet_post_process from utils.oracle_utils import gen_oracle_map from .base_trainer import BaseTrainer class CtdetLoss(torch.nn.Module): def __init__(self, opt): super(CtdetLoss, self).__init__() self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss() self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \ RegLoss() if opt.reg_loss == 'sl1' else None self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \ NormRegL1Loss() if opt.norm_wh else \ RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg self.opt = opt def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy(gen_oracle_map( batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy(gen_oracle_map( batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += ( self.crit_wh(output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg( output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats class CtdetTrainer(BaseTrainer): def __init__(self, opt, model, optimizer=None): super(CtdetTrainer, self).__init__(opt, model, optimizer=optimizer) def _get_losses(self, opt): loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss'] loss = CtdetLoss(opt) return loss_states, loss def debug(self, batch, output, iter_id): opt = self.opt reg = output['reg'] if opt.reg_offset else None dets = ctdet_decode( output['hm'], output['wh'], reg=reg, cat_spec_wh=opt.cat_spec_wh, K=opt.K) dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) dets[:, :, :4] *= opt.down_ratio dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2]) dets_gt[:, :, :4] *= opt.down_ratio for i in range(1): debugger = Debugger( dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme) img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0) img = np.clip((( img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8) pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy()) gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hm') debugger.add_blend_img(img, gt, 'gt_hm') debugger.add_img(img, img_id='out_pred') for k in range(len(dets[i])): if dets[i, k, 4] > opt.center_thresh: debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1], dets[i, k, 4], img_id='out_pred') debugger.add_img(img, img_id='out_gt') for k in range(len(dets_gt[i])): if dets_gt[i, k, 4] > opt.center_thresh: debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1], dets_gt[i, k, 4], img_id='out_gt') if opt.debug == 4: debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id)) else: debugger.show_all_imgs(pause=True) def save_result(self, output, batch, results): reg = output['reg'] if self.opt.reg_offset else None dets = ctdet_decode( output['hm'], output['wh'], reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K) dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) dets_out = ctdet_post_process( dets.copy(), batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1]) results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
5,518
40.810606
78
py
SyNet
SyNet-master/CenterNet/src/lib/trains/ddd.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import numpy as np from models.losses import FocalLoss, L1Loss, BinRotLoss from models.decode import ddd_decode from models.utils import _sigmoid from utils.debugger import Debugger from utils.post_process import ddd_post_process from utils.oracle_utils import gen_oracle_map from .base_trainer import BaseTrainer class DddLoss(torch.nn.Module): def __init__(self, opt): super(DddLoss, self).__init__() self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss() self.crit_reg = L1Loss() self.crit_rot = BinRotLoss() self.opt = opt def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy(gen_oracle_map( batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats class DddTrainer(BaseTrainer): def __init__(self, opt, model, optimizer=None): super(DddTrainer, self).__init__(opt, model, optimizer=optimizer) def _get_losses(self, opt): loss_states = ['loss', 'hm_loss', 'dep_loss', 'dim_loss', 'rot_loss', 'wh_loss', 'off_loss'] loss = DddLoss(opt) return loss_states, loss def debug(self, batch, output, iter_id): opt = self.opt wh = output['wh'] if opt.reg_bbox else None reg = output['reg'] if opt.reg_offset else None dets = ddd_decode(output['hm'], output['rot'], output['dep'], output['dim'], wh=wh, reg=reg, K=opt.K) # x, y, score, r1-r8, depth, dim1-dim3, cls dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) calib = batch['meta']['calib'].detach().numpy() # x, y, score, rot, depth, dim1, dim2, dim3 # if opt.dataset == 'gta': # dets[:, 12:15] /= 3 dets_pred = ddd_post_process( dets.copy(), batch['meta']['c'].detach().numpy(), batch['meta']['s'].detach().numpy(), calib, opt) dets_gt = ddd_post_process( batch['meta']['gt_det'].detach().numpy().copy(), batch['meta']['c'].detach().numpy(), batch['meta']['s'].detach().numpy(), calib, opt) #for i in range(input.size(0)): for i in range(1): debugger = Debugger(dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme) img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0) img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8) pred = debugger.gen_colormap( output['hm'][i].detach().cpu().numpy()) gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'hm_pred') debugger.add_blend_img(img, gt, 'hm_gt') # decode debugger.add_ct_detection( img, dets[i], show_box=opt.reg_bbox, center_thresh=opt.center_thresh, img_id='det_pred') debugger.add_ct_detection( img, batch['meta']['gt_det'][i].cpu().numpy().copy(), show_box=opt.reg_bbox, img_id='det_gt') debugger.add_3d_detection( batch['meta']['image_path'][i], dets_pred[i], calib[i], center_thresh=opt.center_thresh, img_id='add_pred') debugger.add_3d_detection( batch['meta']['image_path'][i], dets_gt[i], calib[i], center_thresh=opt.center_thresh, img_id='add_gt') # debugger.add_bird_view( # dets_pred[i], center_thresh=opt.center_thresh, img_id='bird_pred') # debugger.add_bird_view(dets_gt[i], img_id='bird_gt') debugger.add_bird_views( dets_pred[i], dets_gt[i], center_thresh=opt.center_thresh, img_id='bird_pred_gt') # debugger.add_blend_img(img, pred, 'out', white=True) debugger.compose_vis_add( batch['meta']['image_path'][i], dets_pred[i], calib[i], opt.center_thresh, pred, 'bird_pred_gt', img_id='out') # debugger.add_img(img, img_id='out') if opt.debug ==4: debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id)) else: debugger.show_all_imgs(pause=True) def save_result(self, output, batch, results): opt = self.opt wh = output['wh'] if opt.reg_bbox else None reg = output['reg'] if opt.reg_offset else None dets = ddd_decode(output['hm'], output['rot'], output['dep'], output['dim'], wh=wh, reg=reg, K=opt.K) # x, y, score, r1-r8, depth, dim1-dim3, cls dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) calib = batch['meta']['calib'].detach().numpy() # x, y, score, rot, depth, dim1, dim2, dim3 dets_pred = ddd_post_process( dets.copy(), batch['meta']['c'].detach().numpy(), batch['meta']['s'].detach().numpy(), calib, opt) img_id = batch['meta']['img_id'].detach().numpy()[0] results[img_id] = dets_pred[0] for j in range(1, opt.num_classes + 1): keep_inds = (results[img_id][j][:, -1] > opt.center_thresh) results[img_id][j] = results[img_id][j][keep_inds]
6,919
43.645161
80
py
SyNet
SyNet-master/CenterNet/src/lib/trains/multi_pose.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import numpy as np from models.losses import FocalLoss, RegL1Loss, RegLoss, RegWeightedL1Loss from models.decode import multi_pose_decode from models.utils import _sigmoid, flip_tensor, flip_lr_off, flip_lr from utils.debugger import Debugger from utils.post_process import multi_pose_post_process from utils.oracle_utils import gen_oracle_map from .base_trainer import BaseTrainer class MultiPoseLoss(torch.nn.Module): def __init__(self, opt): super(MultiPoseLoss, self).__init__() self.crit = FocalLoss() self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss() self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \ torch.nn.L1Loss(reduction='sum') self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \ RegLoss() if opt.reg_loss == 'sl1' else None self.opt = opt def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) if opt.hm_hp and not opt.mse_loss: output['hm_hp'] = _sigmoid(output['hm_hp']) if opt.eval_oracle_hmhp: output['hm_hp'] = batch['hm_hp'] if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_kps: if opt.dense_hp: output['hps'] = batch['dense_hps'] else: output['hps'] = torch.from_numpy(gen_oracle_map( batch['hps'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) if opt.eval_oracle_hp_offset: output['hp_offset'] = torch.from_numpy(gen_oracle_map( batch['hp_offset'].detach().cpu().numpy(), batch['hp_ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dense_hp: mask_weight = batch['dense_hps_mask'].sum() + 1e-4 hp_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'], batch['dense_hps'] * batch['dense_hps_mask']) / mask_weight) / opt.num_stacks else: hp_loss += self.crit_kp(output['hps'], batch['hps_mask'], batch['ind'], batch['hps']) / opt.num_stacks if opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.reg_hp_offset and opt.off_weight > 0: hp_offset_loss += self.crit_reg( output['hp_offset'], batch['hp_mask'], batch['hp_ind'], batch['hp_offset']) / opt.num_stacks if opt.hm_hp and opt.hm_hp_weight > 0: hm_hp_loss += self.crit_hm_hp( output['hm_hp'], batch['hm_hp']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.hp_weight * hp_loss + \ opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss, 'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats class MultiPoseTrainer(BaseTrainer): def __init__(self, opt, model, optimizer=None): super(MultiPoseTrainer, self).__init__(opt, model, optimizer=optimizer) def _get_losses(self, opt): loss_states = ['loss', 'hm_loss', 'hp_loss', 'hm_hp_loss', 'hp_offset_loss', 'wh_loss', 'off_loss'] loss = MultiPoseLoss(opt) return loss_states, loss def debug(self, batch, output, iter_id): opt = self.opt reg = output['reg'] if opt.reg_offset else None hm_hp = output['hm_hp'] if opt.hm_hp else None hp_offset = output['hp_offset'] if opt.reg_hp_offset else None dets = multi_pose_decode( output['hm'], output['wh'], output['hps'], reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=opt.K) dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) dets[:, :, :4] *= opt.input_res / opt.output_res dets[:, :, 5:39] *= opt.input_res / opt.output_res dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2]) dets_gt[:, :, :4] *= opt.input_res / opt.output_res dets_gt[:, :, 5:39] *= opt.input_res / opt.output_res for i in range(1): debugger = Debugger( dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme) img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0) img = np.clip((( img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8) pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy()) gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hm') debugger.add_blend_img(img, gt, 'gt_hm') debugger.add_img(img, img_id='out_pred') for k in range(len(dets[i])): if dets[i, k, 4] > opt.center_thresh: debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1], dets[i, k, 4], img_id='out_pred') debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred') debugger.add_img(img, img_id='out_gt') for k in range(len(dets_gt[i])): if dets_gt[i, k, 4] > opt.center_thresh: debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1], dets_gt[i, k, 4], img_id='out_gt') debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt') if opt.hm_hp: pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy()) gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy()) debugger.add_blend_img(img, pred, 'pred_hmhp') debugger.add_blend_img(img, gt, 'gt_hmhp') if opt.debug == 4: debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id)) else: debugger.show_all_imgs(pause=True) def save_result(self, output, batch, results): reg = output['reg'] if self.opt.reg_offset else None hm_hp = output['hm_hp'] if self.opt.hm_hp else None hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None dets = multi_pose_decode( output['hm'], output['wh'], output['hps'], reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K) dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2]) dets_out = multi_pose_post_process( dets.copy(), batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3]) results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
7,252
44.049689
82
py
SyNet
SyNet-master/CenterNet/src/lib/trains/base_trainer.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import torch from progress.bar import Bar from models.data_parallel import DataParallel from utils.utils import AverageMeter class ModelWithLoss(torch.nn.Module): def __init__(self, model, loss): super(ModelWithLoss, self).__init__() self.model = model self.loss = loss def forward(self, batch): outputs = self.model(batch['input']) loss, loss_stats = self.loss(outputs, batch) return outputs[-1], loss, loss_stats class BaseTrainer(object): def __init__( self, opt, model, optimizer=None): self.opt = opt self.optimizer = optimizer self.loss_stats, self.loss = self._get_losses(opt) self.model_with_loss = ModelWithLoss(model, self.loss) def set_device(self, gpus, chunk_sizes, device): if len(gpus) > 1: self.model_with_loss = DataParallel( self.model_with_loss, device_ids=gpus, chunk_sizes=chunk_sizes).to(device) else: self.model_with_loss = self.model_with_loss.to(device) for state in self.optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(device=device, non_blocking=True) def run_epoch(self, phase, epoch, data_loader): model_with_loss = self.model_with_loss if phase == 'train': model_with_loss.train() else: if len(self.opt.gpus) > 1: model_with_loss = self.model_with_loss.module model_with_loss.eval() torch.cuda.empty_cache() opt = self.opt results = {} data_time, batch_time = AverageMeter(), AverageMeter() avg_loss_stats = {l: AverageMeter() for l in self.loss_stats} num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters) end = time.time() for iter_id, batch in enumerate(data_loader): if iter_id >= num_iters: break data_time.update(time.time() - end) for k in batch: if k != 'meta': batch[k] = batch[k].to(device=opt.device, non_blocking=True) output, loss, loss_stats = model_with_loss(batch) loss = loss.mean() if phase == 'train': self.optimizer.zero_grad() loss.backward() self.optimizer.step() batch_time.update(time.time() - end) end = time.time() Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format( epoch, iter_id, num_iters, phase=phase, total=bar.elapsed_td, eta=bar.eta_td) for l in avg_loss_stats: avg_loss_stats[l].update( loss_stats[l].mean().item(), batch['input'].size(0)) Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg) if not opt.hide_data_time: Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \ '|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time) if opt.print_iter > 0: if iter_id % opt.print_iter == 0: print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix)) else: bar.next() if opt.debug > 0: self.debug(batch, output, iter_id) if opt.test: self.save_result(output, batch, results) del output, loss, loss_stats bar.finish() ret = {k: v.avg for k, v in avg_loss_stats.items()} ret['time'] = bar.elapsed_td.total_seconds() / 60. return ret, results def debug(self, batch, output, iter_id): raise NotImplementedError def save_result(self, output, batch, results): raise NotImplementedError def _get_losses(self, opt): raise NotImplementedError def val(self, epoch, data_loader): return self.run_epoch('val', epoch, data_loader) def train(self, epoch, data_loader): return self.run_epoch('train', epoch, data_loader)
3,913
31.890756
80
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset_factory.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from .sample.ddd import DddDataset from .sample.exdet import EXDetDataset from .sample.ctdet import CTDetDataset from .sample.multi_pose import MultiPoseDataset from .dataset.visdrone import Visdrone from .dataset.fashion import Fashion from .dataset.coco import COCO from .dataset.pascal import PascalVOC from .dataset.kitti import KITTI from .dataset.coco_hp import COCOHP dataset_factory = { 'visdrone': Visdrone, 'fashion': Fashion, 'coco': COCO, 'pascal': PascalVOC, 'kitti': KITTI, 'coco_hp': COCOHP } _sample_factory = { 'exdet': EXDetDataset, 'ctdet': CTDetDataset, 'ddd': DddDataset, 'multi_pose': MultiPoseDataset } def get_dataset(dataset, task): class Dataset(dataset_factory[dataset], _sample_factory[task]): pass return Dataset
885
22.315789
65
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/sample/exdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch.utils.data as data import pycocotools.coco as coco import numpy as np import torch import json import cv2 import os from utils.image import flip, color_aug from utils.image import get_affine_transform, affine_transform from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian import pycocotools.coco as coco import math class EXDetDataset(data.Dataset): def _coco_box_to_bbox(self, box): bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32) return bbox def _get_border(self, border, size): i = 1 while size - border // i <= border // i: i *= 2 return border // i def __getitem__(self, index): img_id = self.images[index] img_info = self.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(self.img_dir, img_info['file_name']) img = cv2.imread(img_path) height, width = img.shape[0], img.shape[1] c = np.array([img.shape[1] / 2., img.shape[0] / 2.]) s = max(img.shape[0], img.shape[1]) * 1.0 flipped = False if self.split == 'train': if not self.opt.not_rand_crop: s = s * np.random.choice(np.arange(0.6, 1.4, 0.1)) w_border = self._get_border(128, img.shape[1]) h_border = self._get_border(128, img.shape[0]) c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border) c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border) else: sf = self.opt.scale cf = self.opt.shift s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf) c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf) c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf) if np.random.random() < self.opt.flip: flipped = True img = img[:, ::-1, :] trans_input = get_affine_transform( c, s, 0, [self.opt.input_res, self.opt.input_res]) inp = cv2.warpAffine(img, trans_input, (self.opt.input_res, self.opt.input_res), flags=cv2.INTER_LINEAR) inp = (inp.astype(np.float32) / 255.) if self.split == 'train' and not self.opt.no_color_aug: color_aug(self._data_rng, inp, self._eig_val, self._eig_vec) inp = (inp - self.mean) / self.std inp = inp.transpose(2, 0, 1) output_res = self.opt.output_res num_classes = self.opt.num_classes trans_output = get_affine_transform(c, s, 0, [output_res, output_res]) num_hm = 1 if self.opt.agnostic_ex else num_classes hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32) hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32) hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32) hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32) hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32) reg_t = np.zeros((self.max_objs, 2), dtype=np.float32) reg_l = np.zeros((self.max_objs, 2), dtype=np.float32) reg_b = np.zeros((self.max_objs, 2), dtype=np.float32) reg_r = np.zeros((self.max_objs, 2), dtype=np.float32) ind_t = np.zeros((self.max_objs), dtype=np.int64) ind_l = np.zeros((self.max_objs), dtype=np.int64) ind_b = np.zeros((self.max_objs), dtype=np.int64) ind_r = np.zeros((self.max_objs), dtype=np.int64) reg_mask = np.zeros((self.max_objs), dtype=np.uint8) ann_ids = self.coco.getAnnIds(imgIds=[img_id]) anns = self.coco.loadAnns(ids=ann_ids) num_objs = min(len(anns), self.max_objs) draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \ draw_umich_gaussian for k in range(num_objs): ann = anns[k] # bbox = self._coco_box_to_bbox(ann['bbox']) # tlbr pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2) # cls_id = int(self.cat_ids[ann['category_id']] - 1) # bug cls_id = int(self.cat_ids[ann['category_id']]) hm_id = 0 if self.opt.agnostic_ex else cls_id if flipped: pts[:, 0] = width - pts[:, 0] - 1 pts[1], pts[3] = pts[3].copy(), pts[1].copy() for j in range(4): pts[j] = affine_transform(pts[j], trans_output) pts = np.clip(pts, 0, self.opt.output_res - 1) h, w = pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0] if h > 0 and w > 0: radius = gaussian_radius((math.ceil(h), math.ceil(w))) radius = max(0, int(radius)) pt_int = pts.astype(np.int32) draw_gaussian(hm_t[hm_id], pt_int[0], radius) draw_gaussian(hm_l[hm_id], pt_int[1], radius) draw_gaussian(hm_b[hm_id], pt_int[2], radius) draw_gaussian(hm_r[hm_id], pt_int[3], radius) reg_t[k] = pts[0] - pt_int[0] reg_l[k] = pts[1] - pt_int[1] reg_b[k] = pts[2] - pt_int[2] reg_r[k] = pts[3] - pt_int[3] ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0] ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0] ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0] ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0] ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)] draw_gaussian(hm_c[cls_id], ct, radius) reg_mask[k] = 1 ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b, 'hm_r': hm_r, 'hm_c': hm_c} if self.opt.reg_offset: ret.update({'reg_mask': reg_mask, 'reg_t': reg_t, 'reg_l': reg_l, 'reg_b': reg_b, 'reg_r': reg_r, 'ind_t': ind_t, 'ind_l': ind_l, 'ind_b': ind_b, 'ind_r': ind_r}) return ret
5,722
40.773723
81
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/sample/ctdet.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch.utils.data as data import numpy as np import torch import json import cv2 import os from utils.image import flip, color_aug from utils.image import get_affine_transform, affine_transform from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian from utils.image import draw_dense_reg import math class CTDetDataset(data.Dataset): def _coco_box_to_bbox(self, box): bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32) return bbox def _get_border(self, border, size): i = 1 while size - border // i <= border // i: i *= 2 return border // i def __getitem__(self, index): img_id = self.images[index] file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name'] img_path = os.path.join(self.img_dir, file_name) ann_ids = self.coco.getAnnIds(imgIds=[img_id]) anns = self.coco.loadAnns(ids=ann_ids) num_objs = min(len(anns), self.max_objs) img = cv2.imread(img_path) height, width = img.shape[0], img.shape[1] c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32) if self.opt.keep_res: input_h = (height | self.opt.pad) + 1 input_w = (width | self.opt.pad) + 1 s = np.array([input_w, input_h], dtype=np.float32) else: s = max(img.shape[0], img.shape[1]) * 1.0 input_h, input_w = self.opt.input_h, self.opt.input_w flipped = False if self.split == 'train': if not self.opt.not_rand_crop: s = s * np.random.choice(np.arange(0.6, 1.4, 0.1)) w_border = self._get_border(128, img.shape[1]) h_border = self._get_border(128, img.shape[0]) c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border) c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border) else: sf = self.opt.scale cf = self.opt.shift c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf) if np.random.random() < self.opt.flip: flipped = True img = img[:, ::-1, :] c[0] = width - c[0] - 1 trans_input = get_affine_transform( c, s, 0, [input_w, input_h]) inp = cv2.warpAffine(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR) inp = (inp.astype(np.float32) / 255.) if self.split == 'train' and not self.opt.no_color_aug: color_aug(self._data_rng, inp, self._eig_val, self._eig_vec) inp = (inp - self.mean) / self.std inp = inp.transpose(2, 0, 1) output_h = input_h // self.opt.down_ratio output_w = input_w // self.opt.down_ratio num_classes = self.num_classes trans_output = get_affine_transform(c, s, 0, [output_w, output_h]) hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32) wh = np.zeros((self.max_objs, 2), dtype=np.float32) dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32) reg = np.zeros((self.max_objs, 2), dtype=np.float32) ind = np.zeros((self.max_objs), dtype=np.int64) reg_mask = np.zeros((self.max_objs), dtype=np.uint8) cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32) cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8) draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \ draw_umich_gaussian gt_det = [] for k in range(num_objs): ann = anns[k] bbox = self._coco_box_to_bbox(ann['bbox']) cls_id = int(self.cat_ids[ann['category_id']]) if flipped: bbox[[0, 2]] = width - bbox[[2, 0]] - 1 bbox[:2] = affine_transform(bbox[:2], trans_output) bbox[2:] = affine_transform(bbox[2:], trans_output) bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1) bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1) h, w = bbox[3] - bbox[1], bbox[2] - bbox[0] if h > 0 and w > 0: radius = gaussian_radius((math.ceil(h), math.ceil(w))) radius = max(0, int(radius)) radius = self.opt.hm_gauss if self.opt.mse_loss else radius ct = np.array( [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) ct_int = ct.astype(np.int32) draw_gaussian(hm[cls_id], ct_int, radius) wh[k] = 1. * w, 1. * h ind[k] = ct_int[1] * output_w + ct_int[0] reg[k] = ct - ct_int reg_mask[k] = 1 cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k] cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1 if self.opt.dense_wh: draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius) gt_det.append([ct[0] - w / 2, ct[1] - h / 2, ct[0] + w / 2, ct[1] + h / 2, 1, cls_id]) ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh} if self.opt.dense_wh: hm_a = hm.max(axis=0, keepdims=True) dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0) ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask}) del ret['wh'] elif self.opt.cat_spec_wh: ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask}) del ret['wh'] if self.opt.reg_offset: ret.update({'reg': reg}) if self.opt.debug > 0 or not self.split == 'train': gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \ np.zeros((1, 6), dtype=np.float32) meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id} ret['meta'] = meta return ret
5,803
39.027586
80
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/sample/ddd.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch.utils.data as data import pycocotools.coco as coco import numpy as np import torch import json import cv2 import os import math from utils.image import flip, color_aug from utils.image import get_affine_transform, affine_transform from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian import pycocotools.coco as coco class DddDataset(data.Dataset): def _coco_box_to_bbox(self, box): bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32) return bbox def _convert_alpha(self, alpha): return math.radians(alpha + 45) if self.alpha_in_degree else alpha def __getitem__(self, index): img_id = self.images[index] img_info = self.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(self.img_dir, img_info['file_name']) img = cv2.imread(img_path) if 'calib' in img_info: calib = np.array(img_info['calib'], dtype=np.float32) else: calib = self.calib height, width = img.shape[0], img.shape[1] c = np.array([img.shape[1] / 2., img.shape[0] / 2.]) if self.opt.keep_res: s = np.array([self.opt.input_w, self.opt.input_h], dtype=np.int32) else: s = np.array([width, height], dtype=np.int32) aug = False if self.split == 'train' and np.random.random() < self.opt.aug_ddd: aug = True sf = self.opt.scale cf = self.opt.shift s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf) c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf) c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf) trans_input = get_affine_transform( c, s, 0, [self.opt.input_w, self.opt.input_h]) inp = cv2.warpAffine(img, trans_input, (self.opt.input_w, self.opt.input_h), flags=cv2.INTER_LINEAR) inp = (inp.astype(np.float32) / 255.) # if self.split == 'train' and not self.opt.no_color_aug: # color_aug(self._data_rng, inp, self._eig_val, self._eig_vec) inp = (inp - self.mean) / self.std inp = inp.transpose(2, 0, 1) num_classes = self.opt.num_classes trans_output = get_affine_transform( c, s, 0, [self.opt.output_w, self.opt.output_h]) hm = np.zeros( (num_classes, self.opt.output_h, self.opt.output_w), dtype=np.float32) wh = np.zeros((self.max_objs, 2), dtype=np.float32) reg = np.zeros((self.max_objs, 2), dtype=np.float32) dep = np.zeros((self.max_objs, 1), dtype=np.float32) rotbin = np.zeros((self.max_objs, 2), dtype=np.int64) rotres = np.zeros((self.max_objs, 2), dtype=np.float32) dim = np.zeros((self.max_objs, 3), dtype=np.float32) ind = np.zeros((self.max_objs), dtype=np.int64) reg_mask = np.zeros((self.max_objs), dtype=np.uint8) rot_mask = np.zeros((self.max_objs), dtype=np.uint8) ann_ids = self.coco.getAnnIds(imgIds=[img_id]) anns = self.coco.loadAnns(ids=ann_ids) num_objs = min(len(anns), self.max_objs) draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \ draw_umich_gaussian gt_det = [] for k in range(num_objs): ann = anns[k] bbox = self._coco_box_to_bbox(ann['bbox']) cls_id = int(self.cat_ids[ann['category_id']]) if cls_id <= -99: continue # if flipped: # bbox[[0, 2]] = width - bbox[[2, 0]] - 1 bbox[:2] = affine_transform(bbox[:2], trans_output) bbox[2:] = affine_transform(bbox[2:], trans_output) bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1) bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1) h, w = bbox[3] - bbox[1], bbox[2] - bbox[0] if h > 0 and w > 0: radius = gaussian_radius((h, w)) radius = max(0, int(radius)) ct = np.array( [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) ct_int = ct.astype(np.int32) if cls_id < 0: ignore_id = [_ for _ in range(num_classes)] \ if cls_id == - 1 else [- cls_id - 2] if self.opt.rect_mask: hm[ignore_id, int(bbox[1]): int(bbox[3]) + 1, int(bbox[0]): int(bbox[2]) + 1] = 0.9999 else: for cc in ignore_id: draw_gaussian(hm[cc], ct, radius) hm[ignore_id, ct_int[1], ct_int[0]] = 0.9999 continue draw_gaussian(hm[cls_id], ct, radius) wh[k] = 1. * w, 1. * h gt_det.append([ct[0], ct[1], 1] + \ self._alpha_to_8(self._convert_alpha(ann['alpha'])) + \ [ann['depth']] + (np.array(ann['dim']) / 1).tolist() + [cls_id]) if self.opt.reg_bbox: gt_det[-1] = gt_det[-1][:-1] + [w, h] + [gt_det[-1][-1]] # if (not self.opt.car_only) or cls_id == 1: # Only estimate ADD for cars !!! if 1: alpha = self._convert_alpha(ann['alpha']) # print('img_id cls_id alpha rot_y', img_path, cls_id, alpha, ann['rotation_y']) if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.: rotbin[k, 0] = 1 rotres[k, 0] = alpha - (-0.5 * np.pi) if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.: rotbin[k, 1] = 1 rotres[k, 1] = alpha - (0.5 * np.pi) dep[k] = ann['depth'] dim[k] = ann['dim'] # print(' cat dim', cls_id, dim[k]) ind[k] = ct_int[1] * self.opt.output_w + ct_int[0] reg[k] = ct - ct_int reg_mask[k] = 1 if not aug else 0 rot_mask[k] = 1 # print('gt_det', gt_det) # print('') ret = {'input': inp, 'hm': hm, 'dep': dep, 'dim': dim, 'ind': ind, 'rotbin': rotbin, 'rotres': rotres, 'reg_mask': reg_mask, 'rot_mask': rot_mask} if self.opt.reg_bbox: ret.update({'wh': wh}) if self.opt.reg_offset: ret.update({'reg': reg}) if self.opt.debug > 0 or not ('train' in self.split): gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \ np.zeros((1, 18), dtype=np.float32) meta = {'c': c, 's': s, 'gt_det': gt_det, 'calib': calib, 'image_path': img_path, 'img_id': img_id} ret['meta'] = meta return ret def _alpha_to_8(self, alpha): # return [alpha, 0, 0, 0, 0, 0, 0, 0] ret = [0, 0, 0, 1, 0, 0, 0, 1] if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.: r = alpha - (-0.5 * np.pi) ret[1] = 1 ret[2], ret[3] = np.sin(r), np.cos(r) if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.: r = alpha - (0.5 * np.pi) ret[5] = 1 ret[6], ret[7] = np.sin(r), np.cos(r) return ret
6,801
38.777778
90
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/sample/multi_pose.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch.utils.data as data import numpy as np import torch import json import cv2 import os from utils.image import flip, color_aug from utils.image import get_affine_transform, affine_transform from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian from utils.image import draw_dense_reg import math class MultiPoseDataset(data.Dataset): def _coco_box_to_bbox(self, box): bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32) return bbox def _get_border(self, border, size): i = 1 while size - border // i <= border // i: i *= 2 return border // i def __getitem__(self, index): img_id = self.images[index] file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name'] img_path = os.path.join(self.img_dir, file_name) ann_ids = self.coco.getAnnIds(imgIds=[img_id]) anns = self.coco.loadAnns(ids=ann_ids) num_objs = min(len(anns), self.max_objs) img = cv2.imread(img_path) height, width = img.shape[0], img.shape[1] c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32) s = max(img.shape[0], img.shape[1]) * 1.0 rot = 0 flipped = False if self.split == 'train': if not self.opt.not_rand_crop: s = s * np.random.choice(np.arange(0.6, 1.4, 0.1)) w_border = self._get_border(128, img.shape[1]) h_border = self._get_border(128, img.shape[0]) c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border) c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border) else: sf = self.opt.scale cf = self.opt.shift c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf) if np.random.random() < self.opt.aug_rot: rf = self.opt.rotate rot = np.clip(np.random.randn()*rf, -rf*2, rf*2) if np.random.random() < self.opt.flip: flipped = True img = img[:, ::-1, :] c[0] = width - c[0] - 1 trans_input = get_affine_transform( c, s, rot, [self.opt.input_res, self.opt.input_res]) inp = cv2.warpAffine(img, trans_input, (self.opt.input_res, self.opt.input_res), flags=cv2.INTER_LINEAR) inp = (inp.astype(np.float32) / 255.) if self.split == 'train' and not self.opt.no_color_aug: color_aug(self._data_rng, inp, self._eig_val, self._eig_vec) inp = (inp - self.mean) / self.std inp = inp.transpose(2, 0, 1) output_res = self.opt.output_res num_joints = self.num_joints trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res]) trans_output = get_affine_transform(c, s, 0, [output_res, output_res]) hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32) hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32) dense_kps = np.zeros((num_joints, 2, output_res, output_res), dtype=np.float32) dense_kps_mask = np.zeros((num_joints, output_res, output_res), dtype=np.float32) wh = np.zeros((self.max_objs, 2), dtype=np.float32) kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32) reg = np.zeros((self.max_objs, 2), dtype=np.float32) ind = np.zeros((self.max_objs), dtype=np.int64) reg_mask = np.zeros((self.max_objs), dtype=np.uint8) kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8) hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32) hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64) hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64) draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \ draw_umich_gaussian gt_det = [] for k in range(num_objs): ann = anns[k] bbox = self._coco_box_to_bbox(ann['bbox']) cls_id = int(ann['category_id']) - 1 pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) if flipped: bbox[[0, 2]] = width - bbox[[2, 0]] - 1 pts[:, 0] = width - pts[:, 0] - 1 for e in self.flip_idx: pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy() bbox[:2] = affine_transform(bbox[:2], trans_output) bbox[2:] = affine_transform(bbox[2:], trans_output) bbox = np.clip(bbox, 0, output_res - 1) h, w = bbox[3] - bbox[1], bbox[2] - bbox[0] if (h > 0 and w > 0) or (rot != 0): radius = gaussian_radius((math.ceil(h), math.ceil(w))) radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius)) ct = np.array( [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) ct_int = ct.astype(np.int32) wh[k] = 1. * w, 1. * h ind[k] = ct_int[1] * output_res + ct_int[0] reg[k] = ct - ct_int reg_mask[k] = 1 num_kpts = pts[:, 2].sum() if num_kpts == 0: hm[cls_id, ct_int[1], ct_int[0]] = 0.9999 reg_mask[k] = 0 hp_radius = gaussian_radius((math.ceil(h), math.ceil(w))) hp_radius = self.opt.hm_gauss \ if self.opt.mse_loss else max(0, int(hp_radius)) for j in range(num_joints): if pts[j, 2] > 0: pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot) if pts[j, 0] >= 0 and pts[j, 0] < output_res and \ pts[j, 1] >= 0 and pts[j, 1] < output_res: kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int kps_mask[k, j * 2: j * 2 + 2] = 1 pt_int = pts[j, :2].astype(np.int32) hp_offset[k * num_joints + j] = pts[j, :2] - pt_int hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0] hp_mask[k * num_joints + j] = 1 if self.opt.dense_hp: # must be before draw center hm gaussian draw_dense_reg(dense_kps[j], hm[cls_id], ct_int, pts[j, :2] - ct_int, radius, is_offset=True) draw_gaussian(dense_kps_mask[j], ct_int, radius) draw_gaussian(hm_hp[j], pt_int, hp_radius) draw_gaussian(hm[cls_id], ct_int, radius) gt_det.append([ct[0] - w / 2, ct[1] - h / 2, ct[0] + w / 2, ct[1] + h / 2, 1] + pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id]) if rot != 0: hm = hm * 0 + 0.9999 reg_mask *= 0 kps_mask *= 0 ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'hps': kps, 'hps_mask': kps_mask} if self.opt.dense_hp: dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res) dense_kps_mask = dense_kps_mask.reshape( num_joints, 1, output_res, output_res) dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1) dense_kps_mask = dense_kps_mask.reshape( num_joints * 2, output_res, output_res) ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask}) del ret['hps'], ret['hps_mask'] if self.opt.reg_offset: ret.update({'reg': reg}) if self.opt.hm_hp: ret.update({'hm_hp': hm_hp}) if self.opt.reg_hp_offset: ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask}) if self.opt.debug > 0 or not self.split == 'train': gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \ np.zeros((1, 40), dtype=np.float32) meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id} ret['meta'] = meta return ret
7,913
42.01087
81
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset/kitti.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch.utils.data as data import pycocotools.coco as coco import numpy as np import torch import json import cv2 import os import math import torch.utils.data as data class KITTI(data.Dataset): num_classes = 3 default_resolution = [384, 1280] mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) def __init__(self, opt, split): super(KITTI, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'kitti') self.img_dir = os.path.join(self.data_dir, 'images', 'trainval') if opt.trainval: split = 'trainval' if split == 'train' else 'test' self.img_dir = os.path.join(self.data_dir, 'images', split) self.annot_path = os.path.join( self.data_dir, 'annotations', 'kitti_{}.json').format(split) else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'kitti_{}_{}.json').format(opt.kitti_split, split) self.max_objs = 50 self.class_name = [ '__background__', 'Pedestrian', 'Car', 'Cyclist'] self.cat_ids = {1:0, 2:1, 3:2, 4:-3, 5:-3, 6:-2, 7:-99, 8:-99, 9:-1} self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) self.split = split self.opt = opt self.alpha_in_degree = False print('==> initializing kitti {}, {} data.'.format(opt.kitti_split, split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples)) def __len__(self): return self.num_samples def _to_float(self, x): return float("{:.2f}".format(x)) def convert_eval_format(self, all_bboxes): pass def save_results(self, results, save_dir): results_dir = os.path.join(save_dir, 'results') if not os.path.exists(results_dir): os.mkdir(results_dir) for img_id in results.keys(): out_path = os.path.join(results_dir, '{:06d}.txt'.format(img_id)) f = open(out_path, 'w') for cls_ind in results[img_id]: for j in range(len(results[img_id][cls_ind])): class_name = self.class_name[cls_ind] f.write('{} 0.0 0'.format(class_name)) for i in range(len(results[img_id][cls_ind][j])): f.write(' {:.2f}'.format(results[img_id][cls_ind][j][i])) f.write('\n') f.close() def run_eval(self, results, save_dir): self.save_results(results, save_dir) os.system('./tools/kitti_eval/evaluate_object_3d_offline ' + \ '../data/kitti/training/label_val ' + \ '{}/results/'.format(save_dir))
3,058
32.988889
79
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset/visdrone.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as coco from pycocotools.cocoeval import COCOeval import numpy as np import json import os import torch.utils.data as data class Visdrone(data.Dataset): num_classes = 10 default_resolution = [512, 512] mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3) std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3) def __init__(self, opt, split): super(Visdrone, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split) else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.max_objs = 16 self.class_name = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples)) def _to_float(self, x): return float("{:.2f}".format(x)) def convert_eval_format(self, all_bboxes): # import pdb; pdb.set_trace() detections = [] for image_id in all_bboxes: for cls_ind in all_bboxes[image_id]: category_id = self._valid_ids[cls_ind - 1] for bbox in all_bboxes[image_id][cls_ind]: bbox[2] -= bbox[0] bbox[3] -= bbox[1] score = bbox[4] bbox_out = list(map(self._to_float, bbox[0:4])) detection = { "image_id": int(image_id), "category_id": int(category_id), "bbox": bbox_out, "score": float("{:.2f}".format(score)) } if len(bbox) > 5: extreme_points = list(map(self._to_float, bbox[5:13])) detection["extreme_points"] = extreme_points detections.append(detection) return detections def __len__(self): return self.num_samples def save_results(self, results, save_dir): json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) def run_eval(self, results, save_dir): # result_json = os.path.join(save_dir, "results.json") # detections = self.convert_eval_format(results) # json.dump(detections, open(result_json, "w")) self.save_results(results, save_dir) coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir)) coco_eval = COCOeval(self.coco, coco_dets, "bbox") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
4,040
35.405405
127
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset/coco_hp.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as coco from pycocotools.cocoeval import COCOeval import numpy as np import json import os import torch.utils.data as data class COCOHP(data.Dataset): num_classes = 13 default_resolution = [512, 512] mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3) std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3) def __init__(self, opt, split): super(COCO, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split) else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.max_objs = 128 self.class_name = ['short sleeve top', 'long sleeve top', 'short sleeve outwear', 'long sleeve outwear', 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short sleeve dress', 'long sleeve dress', 'vest dress', 'sling dress'] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples)) def _to_float(self, x): return float("{:.2f}".format(x)) def convert_eval_format(self, all_bboxes): # import pdb; pdb.set_trace() detections = [] for image_id in all_bboxes: for cls_ind in all_bboxes[image_id]: category_id = self._valid_ids[cls_ind - 1] for bbox in all_bboxes[image_id][cls_ind]: bbox[2] -= bbox[0] bbox[3] -= bbox[1] score = bbox[4] bbox_out = list(map(self._to_float, bbox[0:4])) detection = { "image_id": int(image_id), "category_id": int(category_id), "bbox": bbox_out, "score": float("{:.2f}".format(score)) } if len(bbox) > 5: extreme_points = list(map(self._to_float, bbox[5:13])) detection["extreme_points"] = extreme_points detections.append(detection) return detections def __len__(self): return self.num_samples def save_results(self, results, save_dir): json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) def run_eval(self, results, save_dir): # result_json = os.path.join(save_dir, "results.json") # detections = self.convert_eval_format(results) # json.dump(detections, open(result_json, "w")) self.save_results(results, save_dir) coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir)) coco_eval = COCOeval(self.coco, coco_dets, "bbox") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
4,644
39.745614
120
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset/pascal.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as coco import numpy as np import torch import json import os import torch.utils.data as data class PascalVOC(data.Dataset): num_classes = 20 default_resolution = [384, 384] mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3) def __init__(self, opt, split): super(PascalVOC, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'voc') self.img_dir = os.path.join(self.data_dir, 'images') _ann_name = {'train': 'trainval0712', 'val': 'test2007'} self.annot_path = os.path.join( self.data_dir, 'annotations', 'pascal_{}.json').format(_ann_name[split]) self.max_objs = 50 self.class_name = ['__background__', "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] self._valid_ids = np.arange(1, 21, dtype=np.int32) self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) self.split = split self.opt = opt print('==> initializing pascal {} data.'.format(_ann_name[split])) self.coco = coco.COCO(self.annot_path) self.images = sorted(self.coco.getImgIds()) self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples)) def _to_float(self, x): return float("{:.2f}".format(x)) def convert_eval_format(self, all_bboxes): detections = [[[] for __ in range(self.num_samples)] \ for _ in range(self.num_classes + 1)] for i in range(self.num_samples): img_id = self.images[i] for j in range(1, self.num_classes + 1): if isinstance(all_bboxes[img_id][j], np.ndarray): detections[j][i] = all_bboxes[img_id][j].tolist() else: detections[j][i] = all_bboxes[img_id][j] return detections def __len__(self): return self.num_samples def save_results(self, results, save_dir): json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) def run_eval(self, results, save_dir): # result_json = os.path.join(save_dir, "results.json") # detections = self.convert_eval_format(results) # json.dump(detections, open(result_json, "w")) self.save_results(results, save_dir) os.system('python tools/reval.py ' + \ '{}/results.json'.format(save_dir))
3,032
35.542169
80
py
SyNet
SyNet-master/CenterNet/src/lib/datasets/dataset/coco.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pycocotools.coco as coco from pycocotools.cocoeval import COCOeval import numpy as np import json import os import torch.utils.data as data class COCO(data.Dataset): num_classes = 80 default_resolution = [512, 512] mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3) std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3) def __init__(self, opt, split): super(COCO, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split) else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.max_objs = 128 self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples)) def _to_float(self, x): return float("{:.2f}".format(x)) def convert_eval_format(self, all_bboxes): # import pdb; pdb.set_trace() detections = [] for image_id in all_bboxes: for cls_ind in all_bboxes[image_id]: category_id = self._valid_ids[cls_ind - 1] for bbox in all_bboxes[image_id][cls_ind]: bbox[2] -= bbox[0] bbox[3] -= bbox[1] score = bbox[4] bbox_out = list(map(self._to_float, bbox[0:4])) detection = { "image_id": int(image_id), "category_id": int(category_id), "bbox": bbox_out, "score": float("{:.2f}".format(score)) } if len(bbox) > 5: extreme_points = list(map(self._to_float, bbox[5:13])) detection["extreme_points"] = extreme_points detections.append(detection) return detections def __len__(self): return self.num_samples def save_results(self, results, save_dir): json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) def run_eval(self, results, save_dir): # result_json = os.path.join(save_dir, "results.json") # detections = self.convert_eval_format(results) # json.dump(detections, open(result_json, "w")) self.save_results(results, save_dir) coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir)) coco_eval = COCOeval(self.coco, coco_dets, "bbox") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
5,214
39.115385
78
py
SyNet
SyNet-master/CenterNet/src/lib/utils/post_process.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from .image import transform_preds from .ddd_utils import ddd2locrot def get_pred_depth(depth): return depth def get_alpha(rot): # output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos] # return rot[:, 0] idx = rot[:, 1] > rot[:, 5] alpha1 = np.arctan2(rot[:, 2], rot[:, 3]) + (-0.5 * np.pi) alpha2 = np.arctan2(rot[:, 6], rot[:, 7]) + ( 0.5 * np.pi) return alpha1 * idx + alpha2 * (1 - idx) def ddd_post_process_2d(dets, c, s, opt): # dets: batch x max_dets x dim # return 1-based class det list ret = [] include_wh = dets.shape[2] > 16 for i in range(dets.shape[0]): top_preds = {} dets[i, :, :2] = transform_preds( dets[i, :, 0:2], c[i], s[i], (opt.output_w, opt.output_h)) classes = dets[i, :, -1] for j in range(opt.num_classes): inds = (classes == j) top_preds[j + 1] = np.concatenate([ dets[i, inds, :3].astype(np.float32), get_alpha(dets[i, inds, 3:11])[:, np.newaxis].astype(np.float32), get_pred_depth(dets[i, inds, 11:12]).astype(np.float32), dets[i, inds, 12:15].astype(np.float32)], axis=1) if include_wh: top_preds[j + 1] = np.concatenate([ top_preds[j + 1], transform_preds( dets[i, inds, 15:17], c[i], s[i], (opt.output_w, opt.output_h)) .astype(np.float32)], axis=1) ret.append(top_preds) return ret def ddd_post_process_3d(dets, calibs): # dets: batch x max_dets x dim # return 1-based class det list ret = [] for i in range(len(dets)): preds = {} for cls_ind in dets[i].keys(): preds[cls_ind] = [] for j in range(len(dets[i][cls_ind])): center = dets[i][cls_ind][j][:2] score = dets[i][cls_ind][j][2] alpha = dets[i][cls_ind][j][3] depth = dets[i][cls_ind][j][4] dimensions = dets[i][cls_ind][j][5:8] wh = dets[i][cls_ind][j][8:10] locations, rotation_y = ddd2locrot( center, alpha, dimensions, depth, calibs[0]) bbox = [center[0] - wh[0] / 2, center[1] - wh[1] / 2, center[0] + wh[0] / 2, center[1] + wh[1] / 2] pred = [alpha] + bbox + dimensions.tolist() + \ locations.tolist() + [rotation_y, score] preds[cls_ind].append(pred) preds[cls_ind] = np.array(preds[cls_ind], dtype=np.float32) ret.append(preds) return ret def ddd_post_process(dets, c, s, calibs, opt): # dets: batch x max_dets x dim # return 1-based class det list dets = ddd_post_process_2d(dets, c, s, opt) dets = ddd_post_process_3d(dets, calibs) return dets def ctdet_post_process(dets, c, s, h, w, num_classes): # dets: batch x max_dets x dim # return 1-based class det dict ret = [] for i in range(dets.shape[0]): top_preds = {} dets[i, :, :2] = transform_preds( dets[i, :, 0:2], c[i], s[i], (w, h)) dets[i, :, 2:4] = transform_preds( dets[i, :, 2:4], c[i], s[i], (w, h)) classes = dets[i, :, -1] for j in range(num_classes): inds = (classes == j) top_preds[j + 1] = np.concatenate([ dets[i, inds, :4].astype(np.float32), dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() ret.append(top_preds) return ret def multi_pose_post_process(dets, c, s, h, w): # dets: batch x max_dets x 40 # return list of 39 in image coord ret = [] for i in range(dets.shape[0]): bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h)) pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h)) top_preds = np.concatenate( [bbox.reshape(-1, 4), dets[i, :, 4:5], pts.reshape(-1, 34)], axis=1).astype(np.float32).tolist() ret.append({np.ones(1, dtype=np.int32)[0]: top_preds}) return ret
3,958
33.426087
78
py
SyNet
SyNet-master/CenterNet/src/lib/utils/image.py
# Modified by Xingyi Zhou from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import cv2 import random def flip(img): return img[:, :, ::-1].copy() def transform_preds(coords, center, scale, output_size): target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, inv=1) for p in range(coords.shape[0]): target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) return target_coords def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): if not isinstance(scale, np.ndarray) and not isinstance(scale, list): scale = np.array([scale, scale], dtype=np.float32) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2] def get_3rd_point(a, b): direct = a - b return b + np.array([-direct[1], direct[0]], dtype=np.float32) def get_dir(src_point, rot_rad): sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result def crop(img, center, scale, output_size, rot=0): trans = get_affine_transform(center, scale, rot, output_size) dst_img = cv2.warpAffine(img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img def gaussian_radius(det_size, min_overlap=0.7): height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1) r1 = (b1 + sq1) / 2 a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2) r2 = (b2 + sq2) / 2 a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3) r3 = (b3 + sq3) / 2 return min(r1, r2, r3) def gaussian2D(shape, sigma=1): m, n = [(ss - 1.) / 2. for ss in shape] y, x = np.ogrid[-m:m+1,-n:n+1] h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) h[h < np.finfo(h.dtype).eps * h.max()] = 0 return h def draw_umich_gaussian(heatmap, center, radius, k=1): diameter = 2 * radius + 1 gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) x, y = int(center[0]), int(center[1]) height, width = heatmap.shape[0:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap) return heatmap def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False): diameter = 2 * radius + 1 gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) value = np.array(value, dtype=np.float32).reshape(-1, 1, 1) dim = value.shape[0] reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value if is_offset and dim == 2: delta = np.arange(diameter*2+1) - radius reg[0] = reg[0] - delta.reshape(1, -1) reg[1] = reg[1] - delta.reshape(-1, 1) x, y = int(center[0]), int(center[1]) height, width = heatmap.shape[0:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] masked_reg = reg[:, radius - top:radius + bottom, radius - left:radius + right] if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug idx = (masked_gaussian >= masked_heatmap).reshape( 1, masked_gaussian.shape[0], masked_gaussian.shape[1]) masked_regmap = (1-idx) * masked_regmap + idx * masked_reg regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap return regmap def draw_msra_gaussian(heatmap, center, sigma): tmp_size = sigma * 3 mu_x = int(center[0] + 0.5) mu_y = int(center[1] + 0.5) w, h = heatmap.shape[0], heatmap.shape[1] ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0: return heatmap size = 2 * tmp_size + 1 x = np.arange(0, size, 1, np.float32) y = x[:, np.newaxis] x0 = y0 = size // 2 g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) g_x = max(0, -ul[0]), min(br[0], h) - ul[0] g_y = max(0, -ul[1]), min(br[1], w) - ul[1] img_x = max(0, ul[0]), min(br[0], h) img_y = max(0, ul[1]), min(br[1], w) heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum( heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], g[g_y[0]:g_y[1], g_x[0]:g_x[1]]) return heatmap def grayscale(image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) def lighting_(data_rng, image, alphastd, eigval, eigvec): alpha = data_rng.normal(scale=alphastd, size=(3, )) image += np.dot(eigvec, eigval * alpha) def blend_(alpha, image1, image2): image1 *= alpha image2 *= (1 - alpha) image1 += image2 def saturation_(data_rng, image, gs, gs_mean, var): alpha = 1. + data_rng.uniform(low=-var, high=var) blend_(alpha, image, gs[:, :, None]) def brightness_(data_rng, image, gs, gs_mean, var): alpha = 1. + data_rng.uniform(low=-var, high=var) image *= alpha def contrast_(data_rng, image, gs, gs_mean, var): alpha = 1. + data_rng.uniform(low=-var, high=var) blend_(alpha, image, gs_mean) def color_aug(data_rng, image, eig_val, eig_vec): functions = [brightness_, contrast_, saturation_] random.shuffle(functions) gs = grayscale(image) gs_mean = gs.mean() for f in functions: f(data_rng, image, gs, gs_mean, 0.4) lighting_(data_rng, image, 0.1, eig_val, eig_vec)
7,690
32.294372
88
py
SyNet
SyNet-master/CenterNet/src/lib/utils/debugger.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import cv2 from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d class Debugger(object): def __init__(self, ipynb=False, theme='black', num_classes=-1, dataset=None, down_ratio=4): self.ipynb = ipynb if not self.ipynb: import matplotlib.pyplot as plt self.plt = plt self.imgs = {} self.theme = theme colors = [(color_list[_]).astype(np.uint8) \ for _ in range(len(color_list))] self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3) if self.theme == 'white': self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3) self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8) self.dim_scale = 1 if dataset == 'coco_hp': self.names = ['p'] self.num_class = 1 self.num_joints = 17 self.edges = [[0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [6, 12], [11, 12], [11, 13], [13, 15], [12, 14], [14, 16]] self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 255), (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 255), (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)] self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255)] elif num_classes == 80 or dataset == 'coco': self.names = coco_class_name elif num_classes == 20 or dataset == 'pascal': self.names = pascal_class_name elif dataset == 'visdrone': self.names=visdrone_class_name elif dataset == 'gta': self.names = gta_class_name self.focal_length = 935.3074360871937 self.W = 1920 self.H = 1080 self.dim_scale = 3 elif dataset == 'viper': self.names = gta_class_name self.focal_length = 1158 self.W = 1920 self.H = 1080 self.dim_scale = 3 elif num_classes == 3 or dataset == 'kitti': self.names = kitti_class_name self.focal_length = 721.5377 self.W = 1242 self.H = 375 num_classes = len(self.names) self.down_ratio=down_ratio # for bird view self.world_size = 64 self.out_size = 384 def add_img(self, img, img_id='default', revert_color=False): if revert_color: img = 255 - img self.imgs[img_id] = img.copy() def add_mask(self, mask, bg, imgId = 'default', trans = 0.8): self.imgs[imgId] = (mask.reshape( mask.shape[0], mask.shape[1], 1) * 255 * trans + \ bg * (1 - trans)).astype(np.uint8) def show_img(self, pause = False, imgId = 'default'): cv2.imshow('{}'.format(imgId), self.imgs[imgId]) if pause: cv2.waitKey() def add_blend_img(self, back, fore, img_id='blend', trans=0.7): if self.theme == 'white': fore = 255 - fore if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]: fore = cv2.resize(fore, (back.shape[1], back.shape[0])) if len(fore.shape) == 2: fore = fore.reshape(fore.shape[0], fore.shape[1], 1) self.imgs[img_id] = (back * (1. - trans) + fore * trans) self.imgs[img_id][self.imgs[img_id] > 255] = 255 self.imgs[img_id][self.imgs[img_id] < 0] = 0 self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy() ''' # slow version def gen_colormap(self, img, output_res=None): # num_classes = len(self.colors) img[img < 0] = 0 h, w = img.shape[1], img.shape[2] if output_res is None: output_res = (h * self.down_ratio, w * self.down_ratio) color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) for i in range(img.shape[0]): resized = cv2.resize(img[i], (output_res[1], output_res[0])) resized = resized.reshape(output_res[0], output_res[1], 1) cl = self.colors[i] if not (self.theme == 'white') \ else 255 - self.colors[i] color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) return color_map ''' def gen_colormap(self, img, output_res=None): img = img.copy() c, h, w = img.shape[0], img.shape[1], img.shape[2] if output_res is None: output_res = (h * self.down_ratio, w * self.down_ratio) img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) colors = np.array( self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) if self.theme == 'white': colors = 255 - colors color_map = (img * colors).max(axis=2).astype(np.uint8) color_map = cv2.resize(color_map, (output_res[0], output_res[1])) return color_map ''' # slow def gen_colormap_hp(self, img, output_res=None): # num_classes = len(self.colors) # img[img < 0] = 0 h, w = img.shape[1], img.shape[2] if output_res is None: output_res = (h * self.down_ratio, w * self.down_ratio) color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) for i in range(img.shape[0]): resized = cv2.resize(img[i], (output_res[1], output_res[0])) resized = resized.reshape(output_res[0], output_res[1], 1) cl = self.colors_hp[i] if not (self.theme == 'white') else \ (255 - np.array(self.colors_hp[i])) color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) return color_map ''' def gen_colormap_hp(self, img, output_res=None): c, h, w = img.shape[0], img.shape[1], img.shape[2] if output_res is None: output_res = (h * self.down_ratio, w * self.down_ratio) img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) colors = np.array( self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) if self.theme == 'white': colors = 255 - colors color_map = (img * colors).max(axis=2).astype(np.uint8) color_map = cv2.resize(color_map, (output_res[0], output_res[1])) return color_map def add_rect(self, rect1, rect2, c, conf=1, img_id='default'): cv2.rectangle( self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2) if conf < 1: cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1) cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1) cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1) cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1) def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'): bbox = np.array(bbox, dtype=np.int32) # cat = (int(cat) + 1) % 80 cat = int(cat) # print('cat', cat, self.names[cat]) c = self.colors[cat][0][0].tolist() if self.theme == 'white': c = (255 - np.array(c)).tolist() txt = '{}{:.1f}'.format(self.names[cat], conf) font = cv2.FONT_HERSHEY_SIMPLEX cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0] cv2.rectangle( self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2) if show_txt: cv2.rectangle(self.imgs[img_id], (bbox[0], bbox[1] - cat_size[1] - 2), (bbox[0] + cat_size[0], bbox[1] - 2), c, -1) cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA) def add_coco_hp(self, points, img_id='default'): points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2) for j in range(self.num_joints): cv2.circle(self.imgs[img_id], (points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1) for j, e in enumerate(self.edges): if points[e].min() > 0: cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]), (points[e[1], 0], points[e[1], 1]), self.ec[j], 2, lineType=cv2.LINE_AA) def add_points(self, points, img_id='default'): num_classes = len(points) # assert num_classes == len(self.colors) for i in range(num_classes): for j in range(len(points[i])): c = self.colors[i, 0, 0] cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, points[i][j][1] * self.down_ratio), 5, (255, 255, 255), -1) cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, points[i][j][1] * self.down_ratio), 3, (int(c[0]), int(c[1]), int(c[2])), -1) def show_all_imgs(self, pause=False, time=0): if not self.ipynb: for i, v in self.imgs.items(): cv2.imshow('{}'.format(i), v) if cv2.waitKey(0 if pause else 1) == 27: import sys sys.exit(0) else: self.ax = None nImgs = len(self.imgs) fig=self.plt.figure(figsize=(nImgs * 10,10)) nCols = nImgs nRows = nImgs // nCols for i, (k, v) in enumerate(self.imgs.items()): fig.add_subplot(1, nImgs, i + 1) if len(v.shape) == 3: self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB)) else: self.plt.imshow(v) self.plt.show() def save_img(self, imgId='default', path='./cache/debug/'): cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId]) def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False): if genID: try: idx = int(np.loadtxt(path + '/id.txt')) except: idx = 0 prefix=idx np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d') for i, v in self.imgs.items(): cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v) def remove_side(self, img_id, img): if not (img_id in self.imgs): return ws = img.sum(axis=2).sum(axis=0) l = 0 while ws[l] == 0 and l < len(ws): l+= 1 r = ws.shape[0] - 1 while ws[r] == 0 and r > 0: r -= 1 hs = img.sum(axis=2).sum(axis=1) t = 0 while hs[t] == 0 and t < len(hs): t += 1 b = hs.shape[0] - 1 while hs[b] == 0 and b > 0: b -= 1 self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy() def project_3d_to_bird(self, pt): pt[0] += self.world_size / 2 pt[1] = self.world_size - pt[1] pt = pt * self.out_size / self.world_size return pt.astype(np.int32) def add_ct_detection( self, img, dets, show_box=False, show_txt=True, center_thresh=0.5, img_id='det'): # dets: max_preds x 5 self.imgs[img_id] = img.copy() if type(dets) == type({}): for cat in dets: for i in range(len(dets[cat])): if dets[cat][i, 2] > center_thresh: cl = (self.colors[cat, 0, 0]).tolist() ct = dets[cat][i, :2].astype(np.int32) if show_box: w, h = dets[cat][i, -2], dets[cat][i, -1] x, y = dets[cat][i, 0], dets[cat][i, 1] bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], dtype=np.float32) self.add_coco_bbox( bbox, cat - 1, dets[cat][i, 2], show_txt=show_txt, img_id=img_id) else: for i in range(len(dets)): if dets[i, 2] > center_thresh: # print('dets', dets[i]) cat = int(dets[i, -1]) cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \ 255 - self.colors[cat, 0, 0]).tolist() ct = dets[i, :2].astype(np.int32) * self.down_ratio cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1) if show_box: w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], dtype=np.float32) self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id) def add_3d_detection( self, image_or_path, dets, calib, show_txt=False, center_thresh=0.5, img_id='det'): if isinstance(image_or_path, np.ndarray): self.imgs[img_id] = image_or_path else: self.imgs[img_id] = cv2.imread(image_or_path) for cat in dets: for i in range(len(dets[cat])): cl = (self.colors[cat - 1, 0, 0]).tolist() if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale # dim = dim / self.dim_scale if loc[2] > 1: box_3d = compute_box_3d(dim, loc, rot_y) box_2d = project_to_image(box_3d, calib) self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) def compose_vis_add( self, img_path, dets, calib, center_thresh, pred, bev, img_id='out'): self.imgs[img_id] = cv2.imread(img_path) # h, w = self.imgs[img_id].shape[:2] # pred = cv2.resize(pred, (h, w)) h, w = pred.shape[:2] hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h)) self.add_blend_img(self.imgs[img_id], pred, img_id) for cat in dets: for i in range(len(dets[cat])): cl = (self.colors[cat - 1, 0, 0]).tolist() if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale # dim = dim / self.dim_scale if loc[2] > 1: box_3d = compute_box_3d(dim, loc, rot_y) box_2d = project_to_image(box_3d, calib) box_2d[:, 0] /= hs box_2d[:, 1] /= ws self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) self.imgs[img_id] = np.concatenate( [self.imgs[img_id], self.imgs[bev]], axis=1) def add_2d_detection( self, img, dets, show_box=False, show_txt=True, center_thresh=0.5, img_id='det'): self.imgs[img_id] = img for cat in dets: for i in range(len(dets[cat])): cl = (self.colors[cat - 1, 0, 0]).tolist() if dets[cat][i, -1] > center_thresh: bbox = dets[cat][i, 1:5] self.add_coco_bbox( bbox, cat - 1, dets[cat][i, -1], show_txt=show_txt, img_id=img_id) def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'): bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 for cat in dets: cl = (self.colors[cat - 1, 0, 0]).tolist() lc = (250, 152, 12) for i in range(len(dets[cat])): if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] for k in range(4): rect[k] = self.project_3d_to_bird(rect[k]) # cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1) cv2.polylines( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], True,lc,2,lineType=cv2.LINE_AA) for e in [[0, 1]]: t = 4 if e == [0, 1] else 1 cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), (rect[e[1]][0], rect[e[1]][1]), lc, t, lineType=cv2.LINE_AA) self.imgs[img_id] = bird_view def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'): alpha = 0.5 bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 for ii, (dets, lc, cc) in enumerate( [(dets_gt, (12, 49, 250), (0, 0, 255)), (dets_dt, (250, 152, 12), (255, 0, 0))]): # cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3) for cat in dets: cl = (self.colors[cat - 1, 0, 0]).tolist() for i in range(len(dets[cat])): if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] for k in range(4): rect[k] = self.project_3d_to_bird(rect[k]) if ii == 0: cv2.fillPoly( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], lc,lineType=cv2.LINE_AA) else: cv2.polylines( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], True,lc,2,lineType=cv2.LINE_AA) # for e in [[0, 1], [1, 2], [2, 3], [3, 0]]: for e in [[0, 1]]: t = 4 if e == [0, 1] else 1 cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), (rect[e[1]][0], rect[e[1]][1]), lc, t, lineType=cv2.LINE_AA) self.imgs[img_id] = bird_view kitti_class_name = [ 'p', 'v', 'b' ] gta_class_name = [ 'p', 'v' ] pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] coco_class_name = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] visdrone_class_name = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"] color_list = np.array( [ 1.000, 1.000, 1.000, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 0.000, 0.447, 0.741, 0.50, 0.5, 0 ] ).astype(np.float32) color_list = color_list.reshape((-1, 3)) * 255
21,425
38.09854
127
py
SyNet
SyNet-master/CenterNet/src/lib/utils/oracle_utils.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import numba @numba.jit(nopython=True, nogil=True) def gen_oracle_map(feat, ind, w, h): # feat: B x maxN x featDim # ind: B x maxN batch_size = feat.shape[0] max_objs = feat.shape[1] feat_dim = feat.shape[2] out = np.zeros((batch_size, feat_dim, h, w), dtype=np.float32) vis = np.zeros((batch_size, h, w), dtype=np.uint8) ds = [(0, 1), (0, -1), (1, 0), (-1, 0)] for i in range(batch_size): queue_ind = np.zeros((h*w*2, 2), dtype=np.int32) queue_feat = np.zeros((h*w*2, feat_dim), dtype=np.float32) head, tail = 0, 0 for j in range(max_objs): if ind[i][j] > 0: x, y = ind[i][j] % w, ind[i][j] // w out[i, :, y, x] = feat[i][j] vis[i, y, x] = 1 queue_ind[tail] = x, y queue_feat[tail] = feat[i][j] tail += 1 while tail - head > 0: x, y = queue_ind[head] f = queue_feat[head] head += 1 for (dx, dy) in ds: xx, yy = x + dx, y + dy if xx >= 0 and yy >= 0 and xx < w and yy < h and vis[i, yy, xx] < 1: out[i, :, yy, xx] = f vis[i, yy, xx] = 1 queue_ind[tail] = xx, yy queue_feat[tail] = f tail += 1 return out
1,317
30.380952
76
py
SyNet
SyNet-master/CenterNet/src/lib/utils/utils.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n if self.count > 0: self.avg = self.sum / self.count
542
22.608696
59
py
SyNet
SyNet-master/CenterNet/src/lib/utils/ddd_utils.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import cv2 def compute_box_3d(dim, location, rotation_y): # dim: 3 # location: 3 # rotation_y: 1 # return: 8 x 3 c, s = np.cos(rotation_y), np.sin(rotation_y) R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) l, w, h = dim[2], dim[1], dim[0] x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2] y_corners = [0,0,0,0,-h,-h,-h,-h] z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2] corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32) corners_3d = np.dot(R, corners) corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1) return corners_3d.transpose(1, 0) def project_to_image(pts_3d, P): # pts_3d: n x 3 # P: 3 x 4 # return: n x 2 pts_3d_homo = np.concatenate( [pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1) pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0) pts_2d = pts_2d[:, :2] / pts_2d[:, 2:] # import pdb; pdb.set_trace() return pts_2d def compute_orientation_3d(dim, location, rotation_y): # dim: 3 # location: 3 # rotation_y: 1 # return: 2 x 3 c, s = np.cos(rotation_y), np.sin(rotation_y) R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32) orientation_3d = np.dot(R, orientation_3d) orientation_3d = orientation_3d + \ np.array(location, dtype=np.float32).reshape(3, 1) return orientation_3d.transpose(1, 0) def draw_box_3d(image, corners, c=(0, 0, 255)): face_idx = [[0,1,5,4], [1,2,6, 5], [2,3,7,6], [3,0,4,7]] for ind_f in range(3, -1, -1): f = face_idx[ind_f] for j in range(4): cv2.line(image, (corners[f[j], 0], corners[f[j], 1]), (corners[f[(j+1)%4], 0], corners[f[(j+1)%4], 1]), c, 2, lineType=cv2.LINE_AA) if ind_f == 0: cv2.line(image, (corners[f[0], 0], corners[f[0], 1]), (corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA) cv2.line(image, (corners[f[1], 0], corners[f[1], 1]), (corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA) return image def unproject_2d_to_3d(pt_2d, depth, P): # pts_2d: 2 # depth: 1 # P: 3 x 4 # return: 3 z = depth - P[2, 3] x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0] y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1] pt_3d = np.array([x, y, z], dtype=np.float32) return pt_3d def alpha2rot_y(alpha, x, cx, fx): """ Get rotation_y by alpha + theta - 180 alpha : Observation angle of object, ranging [-pi..pi] x : Object center x to the camera center (x-W/2), in pixels rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] """ rot_y = alpha + np.arctan2(x - cx, fx) if rot_y > np.pi: rot_y -= 2 * np.pi if rot_y < -np.pi: rot_y += 2 * np.pi return rot_y def rot_y2alpha(rot_y, x, cx, fx): """ Get rotation_y by alpha + theta - 180 alpha : Observation angle of object, ranging [-pi..pi] x : Object center x to the camera center (x-W/2), in pixels rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] """ alpha = rot_y - np.arctan2(x - cx, fx) if alpha > np.pi: alpha -= 2 * np.pi if alpha < -np.pi: alpha += 2 * np.pi return alpha def ddd2locrot(center, alpha, dim, depth, calib): # single image locations = unproject_2d_to_3d(center, depth, calib) locations[1] += dim[0] / 2 rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0]) return locations, rotation_y def project_3d_bbox(location, dim, rotation_y, calib): box_3d = compute_box_3d(dim, location, rotation_y) box_2d = project_to_image(box_3d, calib) return box_2d if __name__ == '__main__': calib = np.array( [[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01], [0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01], [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]], dtype=np.float32) alpha = -0.20 tl = np.array([712.40, 143.00], dtype=np.float32) br = np.array([810.73, 307.92], dtype=np.float32) ct = (tl + br) / 2 rotation_y = 0.01 print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0])) print('rotation_y', rotation_y)
4,548
33.725191
92
py
SyNet
SyNet-master/CenterNet/data/example.py
import os from xml.etree.ElementTree import dump import json import pprint import argparse from Format import VOC, COCO, UDACITY, KITTI, YOLO parser = argparse.ArgumentParser(description='label Converting example.') parser.add_argument('--datasets', type=str, help='type of datasets') parser.add_argument('--img_path', type=str, help='directory of image folder') parser.add_argument('--label', type=str, help='directory of label folder or label file path') parser.add_argument('--convert_output_path', type=str, help='directory of label folder') parser.add_argument('--img_type', type=str, help='type of image') parser.add_argument('--manipast_path', type=str, help='directory of manipast file', default="./") parser.add_argument('--cls_list_file', type=str, help='directory of *.names file', default="./") args = parser.parse_args() def main(config): if config["datasets"] == "VOC": voc = VOC() yolo = YOLO(os.path.abspath(config["cls_list"])) flag, data = voc.parse(config["label"]) if flag == True: flag, data = yolo.generate(data) if flag == True: flag, data = yolo.save(data, config["output_path"], config["img_path"] , config["img_type"], config["manipast_path"]) if flag == False: print("Saving Result : {}, msg : {}".format(flag, data)) else: print("YOLO Generating Result : {}, msg : {}".format(flag, data)) else: print("VOC Parsing Result : {}, msg : {}".format(flag, data)) elif config["datasets"] == "COCO": coco = COCO() yolo = YOLO(os.path.abspath(config["cls_list"])) flag, data = coco.parse(config["label"]) if flag == True: flag, data = yolo.generate(data) if flag == True: flag, data = yolo.save(data, config["output_path"], config["img_path"], config["img_type"], config["manipast_path"]) if flag == False: print("Saving Result : {}, msg : {}".format(flag, data)) else: print("YOLO Generating Result : {}, msg : {}".format(flag, data)) else: print("COCO Parsing Result : {}, msg : {}".format(flag, data)) elif config["datasets"] == "UDACITY": udacity = UDACITY() yolo = YOLO(os.path.abspath(config["cls_list"])) flag, data = udacity.parse(config["label"]) if flag == True: flag, data = yolo.generate(data) if flag == True: flag, data = yolo.save(data, config["output_path"], config["img_path"], config["img_type"], config["manipast_path"]) if flag == False: print("Saving Result : {}, msg : {}".format(flag, data)) else: print("UDACITY Generating Result : {}, msg : {}".format(flag, data)) else: print("COCO Parsing Result : {}, msg : {}".format(flag, data)) elif config["datasets"] == "KITTI": kitti = KITTI() yolo = YOLO(os.path.abspath(config["cls_list"])) flag, data = kitti.parse(config["label"], config["img_path"], img_type=config["img_type"]) if flag == True: flag, data = yolo.generate(data) if flag == True: flag, data = yolo.save(data, config["output_path"], config["img_path"], config["img_type"], config["manipast_path"]) if flag == False: print("Saving Result : {}, msg : {}".format(flag, data)) else: print("YOLO Generating Result : {}, msg : {}".format(flag, data)) else: print("KITTI Parsing Result : {}, msg : {}".format(flag, data)) else: print("Unkwon Datasets") if __name__ == '__main__': config ={ "datasets": args.datasets, "img_path": args.img_path, "label": args.label, "img_type": args.img_type, "manipast_path": args.manipast_path, "output_path": args.convert_output_path, "cls_list": args.cls_list_file, } main(config)
4,305
31.37594
98
py
SyNet
SyNet-master/CenterNet/data/msgLogInfo.py
class color: BOLD = '\033[1m' END = '\033[0m' DEFAULT = '\033[0;37;40m' RED = '\033[91m'
104
20
29
py
SyNet
SyNet-master/CenterNet/data/label_visualization.py
import os import argparse import time import pprint from PIL import Image, ImageDraw import matplotlib.pyplot as plt import json from Format import VOC, COCO, UDACITY, KITTI, YOLO parser = argparse.ArgumentParser(description='Evaluate label Converting.') parser.add_argument('--datasets', type=str, help='type of datasets') parser.add_argument('--img_path', type=str, help='directory of image folder') parser.add_argument('--label_path', type=str, help='directory of label folder') parser.add_argument('--img_type', type=str, help='type of image', default='.jpg') parser.add_argument('--cls_list_file', type=str, help='directory of *.names file', default="./") args = parser.parse_args() def main(): pp = pprint.PrettyPrinter(indent=4) img_path = args.img_path label_path = args.label_path img_type = args.img_type datasets = args.datasets cls_list = args.cls_list_file result = None data = None if datasets == "COCO": coco = COCO() result, data = coco.parse(label_path) elif datasets == "VOC": voc = VOC() result, data = voc.parse(label_path) elif datasets == "UDACITY": udacity = UDACITY() result, data = udacity.parse(label_path, img_path) elif datasets == "KITTI": kitti = KITTI() result, data = kitti.parse(label_path, img_path, img_type=img_type) elif datasets == "YOLO": yolo =YOLO(os.path.abspath(cls_list)) result, data = yolo.parse(label_path, img_path, img_type=img_type) if result is True: for key in data: filepath = "".join([img_path, key, img_type]) im = Image.open(filepath) draw = ImageDraw.Draw(im) print("data['{}']: ".format(key), end="") pp.pprint(data[key]) print("num_object : {}".format(data[key]["objects"]["num_obj"])) for idx in range(0, int(data[key]["objects"]["num_obj"])): print("idx {}, name : {}, bndbox :{}".format(idx, data[key]["objects"][str(idx)]["name"], data[key]["objects"][str(idx)]["bndbox"])) x0 = data[key]["objects"][str(idx)]["bndbox"]["xmin"] y0 = data[key]["objects"][str(idx)]["bndbox"]["ymin"] x1 = data[key]["objects"][str(idx)]["bndbox"]["xmax"] y1 = data[key]["objects"][str(idx)]["bndbox"]["ymax"] draw.rectangle(((x0,y0), (x1,y1)), outline='#00ff88') draw.text((x0,y0), data[key]["objects"][str(idx)]["name"]) del draw print("===============================================================================================\n\n") plt.imshow(im) plt.show() plt.clf() im.close() else: print("return value : {}, msg : {}, args: {}".format(result, data, args)) if __name__ == '__main__': main()
2,912
32.872093
148
py
SyNet
SyNet-master/Ensemble/converFastCOCO.py
import json import pickle as pk import numpy as np from ensemble_boxes import * # tempDict = next(item for item in out if item['image_id'] == 1 and item['score']>confidence) numVal = 5000 numTrain = 191961 confidence = 0.001 with open('COCOVAL.json', 'r') as f: out = json.load(f) with open('centerval.json', 'r') as f: out2 = json.load(f) # boxesYOLO = pk.load(open('val_boxes.pkl','rb')) # labelsYOLO = pk.load(open('val_labels.pkl','rb')) # scoresYOLO =pk. load(open('val_scores.pkl','rb')) boxesfast = [] labelsfast = [] scoresfast = [] idsfast = [] boxescent = [] labelscent = [] scorescent = [] idscent = [] index = 0 index2 = 0 tempID = out[index]['image_id'] temptemp = 0 allids = [] for i in range(5000): check = True tempBoxes = [] tempLabels = [] tempScores = [] tempIds = [] tempBoxes2 = [] tempLabels2 = [] tempScores2 = [] tempIds2 = [] while(check and index<len(out)): item = out[index] if item['image_id'] == tempID: print('Sample ' + str(tempID)) tempBBOX = item['bbox'] tempBBOX[2] += tempBBOX[0] tempBBOX[3] += tempBBOX[1] tempBoxes.append(tempBBOX) tempScores.append(item['score']) tempLabels.append(item['category_id'] - 1) tempIds.append(item['image_id']) temptemp = tempID index +=1 else: allids.append(tempID) tempID = out[index]['image_id'] check = False boxesfast.append(np.asarray(tempBoxes)) labelsfast.append(np.asarray(tempLabels)) scoresfast.append(np.asarray(tempScores)) idsfast.append(np.asarray(tempIds)) # tmp = 0 # check = True # while (check and tmp < len(out)): # item = out2[tmp] # if item['image_id'] == temptemp: # check = False # else: # tmp +=1 # check = True # while (check and tmp < len(out)): # item = out2[tmp] # if item['image_id'] == temptemp: # tempBBOX = item['bbox'] # tempBBOX[2] += tempBBOX[0] # tempBBOX[3] += tempBBOX[1] # tempBoxes2.append(tempBBOX) # tempScores2.append(item['score']) # tempLabels2.append(item['category_id'] - 1) # tempIds2.append(item['image_id']) # tmp += 1 # else: # check = False # # boxescent.append(np.asarray(tempBoxes2)) # labelscent.append(np.asarray(tempLabels2)) # scorescent.append(np.asarray(tempScores2)) # idscent.append(np.asarray(tempIds2)) with open('fastcocobox.pkl', 'wb') as f: pk.dump(boxesfast, f) with open('fastcocolab.pkl', 'wb') as f: pk.dump(labelsfast, f) with open('fastcocosco.pkl', 'wb') as f: pk.dump(scoresfast, f) with open('fastcocoid.pkl', 'wb') as f: pk.dump(idsfast, f) # with open('centcocobox.pkl', 'wb') as f: # pk.dump(boxescent, f) # with open('centcocolab.pkl', 'wb') as f: # pk.dump(labelscent, f) # with open('centcocosco.pkl', 'wb') as f: # pk.dump(scorescent, f) # with open('centcocoid.pkl', 'wb') as f: # pk.dump(idscent, f) with open('allids.pkl', 'wb') as f: pk.dump(allids, f) a = 5
3,227
27.315789
97
py
SyNet
SyNet-master/Ensemble/convertCENTER.py
import json import pickle as pk import numpy as np from ensemble_boxes import * # tempDict = next(item for item in out if item['image_id'] == 1 and item['score']>confidence) numVal = 548 numTrain = 191961 confidence = 0.001 with open('results.json', 'r') as f: out = json.load(f) # boxesYOLO = pk.load(open('val_boxes.pkl','rb')) # labelsYOLO = pk.load(open('val_labels.pkl','rb')) # scoresYOLO =pk. load(open('val_scores.pkl','rb')) boxesCSC = [] labelsCSC = [] scoresCSC = [] index = 0 for i in range(548): tempID = i + 1 check = True tempBoxes = [] tempLabels = [] tempScores = [] while(check and index<len(out)): item = out[index] if item['image_id'] == tempID: print('Sample ' + str(tempID)) tempBBOX = item['bbox'] tempBBOX[2] += tempBBOX[0] tempBBOX[3] += tempBBOX[1] tempBoxes.append(tempBBOX) tempScores.append(item['score']) tempLabels.append(item['category_id'] - 1) index +=1 else: check = False boxesCSC.append(np.asarray(tempBoxes)) labelsCSC.append(np.asarray(tempLabels)) scoresCSC.append(np.asarray(tempScores)) with open('val_boxes_center.pkl', 'wb') as f: pk.dump(boxesCSC, f) with open('val_labels_center.pkl', 'wb') as f: pk.dump(labelsCSC, f) with open('val_scores_center.pkl', 'wb') as f: pk.dump(scoresCSC, f) a = 5
1,434
26.596154
97
py
SyNet
SyNet-master/Ensemble/cocoBBOX.py
from ensemble_boxes import * import pickle as pk import numpy as np numVal = 5000 numTrain = 191961 boxesCSC50 = pk.load(open('centcocobox.pkl','rb')) labelsCSC50 = pk.load(open('centcocolab.pkl','rb')) scoresCSC50 =pk.load(open('centcocosco.pkl','rb')) idsc = pk. load(open('centcocoid.pkl','rb')) boxesCSC101 = pk.load(open('fastcocobox.pkl','rb')) labelsCSC101 = pk.load(open('fastcocolab.pkl','rb')) scoresCSC101 =pk.load(open('fastcocosco.pkl','rb')) idsf = pk. load(open('fastcocoid.pkl','rb')) for i in range(len(boxesCSC50)): boxesCSC50[i] = np.ndarray.tolist(boxesCSC50[i]) labelsCSC50[i] = np.ndarray.tolist(labelsCSC50[i]) scoresCSC50[i] = np.ndarray.tolist(scoresCSC50[i]) for i in range(len(boxesCSC101)): boxesCSC101[i] = np.ndarray.tolist(boxesCSC101[i]) labelsCSC101[i] = np.ndarray.tolist(labelsCSC101[i]) scoresCSC101[i] = np.ndarray.tolist(scoresCSC101[i]) weights = [1, 1] weights2 = [5, 1] threshiou = 0.55 threshlow = 0.025 boxesYC = [] scoresYC = [] labelsYC = [] boxesCC = [] scoresCC = [] labelsCC = [] # for i in range(numVal): # print(i) # boxes1, scores1, labels1 = weighted_boxes_fusion([boxesCSC101[i], boxesYOLO[i]], [scoresCSC101[i], scoresYOLO[i]], # [labelsCSC101[i], labelsYOLO[i]], weights=weights, iou_thr=threshiou, # skip_box_thr=threshlow) # # boxes2, scores2, labels2 = getBBOX([boxesCSC101[i], boxesYOLO[i]], [scoresCSC101[i], scoresYOLO[i]], # # [labelsCSC101[i], labelsYOLO[i]], w=weights, threshiou=threshiou, # # threshlow=threshlow) # boxesYC.append(boxes1) # scoresYC.append(scores1) # labelsYC.append(labels1) # # boxesYC.append(boxes1) # # scoresYC.append(scores1) # # labelsYC.append(labels1) for i in range(numVal): print(i) boxes2, scores2, labels2 = weighted_boxes_fusion([boxesCSC101[i], boxesCSC50[i]], [scoresCSC101[i], scoresCSC50[i]], [labelsCSC101[i], labelsCSC50[i]], weights=weights, iou_thr=threshiou, skip_box_thr=threshlow) # boxes2, scores2, labels2 = getBBOX([boxesCSC101[i], boxesYOLO[i]], [boxesCSC50[i], scoresCSC50[i]], # [labelsCSC101[i], labelsCSC50[i]], w=weights, threshiou=threshiou, # threshlow=threshlow) # boxesCC.append(boxes2) # scoresCC.append(scores2) # labelsCC.append(labels2) boxesCC.append(boxes2) scoresCC.append(scores2) labelsCC.append(labels2) # with open('val_boxes_YC.pkl', 'wb') as f: # pk.dump(boxesYC, f) # with open('val_labels_YC.pkl', 'wb') as f: # pk.dump(labelsYC, f) # with open('val_scores_YC.pkl', 'wb') as f: # pk.dump(scoresYC, f) with open('val_boxes_CC.pkl', 'wb') as f: pk.dump(boxesCC, f) with open('val_labels_CC.pkl', 'wb') as f: pk.dump(labelsCC, f) with open('val_scores_CC.pkl', 'wb') as f: pk.dump(scoresCC, f) deneme = 0
3,254
35.166667
124
py
SyNet
SyNet-master/tensorpack/setup.py
from os import path import setuptools from setuptools import setup, find_packages version = int(setuptools.__version__.split('.')[0]) assert version > 30, "Tensorpack installation requires setuptools > 30" this_directory = path.abspath(path.dirname(__file__)) # setup metainfo libinfo_py = path.join(this_directory, 'tensorpack', 'libinfo.py') libinfo_content = open(libinfo_py, "r").readlines() version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][0] exec(version_line) # produce __version__ with open(path.join(this_directory, 'README.md'), 'rb') as f: long_description = f.read().decode('utf-8') def add_git_version(): def get_git_version(): from subprocess import check_output try: return check_output("git describe --tags --long --dirty".split()).decode('utf-8').strip() except Exception: return __version__ # noqa newlibinfo_content = [l for l in libinfo_content if not l.startswith('__git_version__')] newlibinfo_content.append('__git_version__ = "{}"'.format(get_git_version())) with open(libinfo_py, "w") as f: f.write("".join(newlibinfo_content)) add_git_version() setup( name='tensorpack', author="TensorPack contributors", author_email="ppwwyyxxc@gmail.com", url="https://github.com/tensorpack/tensorpack", keywords="tensorflow, deep learning, neural network", license="Apache", version=__version__, # noqa description='A Neural Network Training Interface on TensorFlow', long_description=long_description, long_description_content_type='text/markdown', packages=find_packages(exclude=["examples", "tests"]), zip_safe=False, # dataset and __init__ use file install_requires=[ "numpy>=1.14", "six", "termcolor>=1.1", "tabulate>=0.7.7", "tqdm>4.29.0", "msgpack>=0.5.2", "msgpack-numpy>=0.4.4.2", "pyzmq>=16", "psutil>=5", ], tests_require=['flake8', 'scikit-image'], extras_require={ 'all': ['scipy', 'h5py', 'lmdb>=0.92', 'matplotlib', 'scikit-learn'], 'all: "linux" in sys_platform': ['python-prctl'], }, # https://packaging.python.org/guides/distributing-packages-using-setuptools/#universal-wheels options={'bdist_wheel': {'universal': '1'}}, )
2,356
30.851351
101
py
SyNet
SyNet-master/tensorpack/examples/boilerplate.py
import argparse import os import tensorflow as tf from tensorpack import * """ This is a boiler-plate template. All code is in this file is the most minimalistic way to solve a deep-learning problem with cross-validation. """ BATCH_SIZE = 16 SHAPE = 28 CHANNELS = 3 class Model(ModelDesc): def inputs(self): return [tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.float32, 'input1'), tf.TensorSpec((None,), tf.int32, 'input2')] def build_graph(self, input1, input2): cost = tf.identity(input1 - input2, name='total_costs') summary.add_moving_summary(cost) return cost def optimizer(self): lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False) return tf.train.AdamOptimizer(lr) def get_data(subset): # something that yields [[SHAPE, SHAPE, CHANNELS], [1]] ds = FakeData([[SHAPE, SHAPE, CHANNELS], [1]], 1000, random=False, dtype=['float32', 'uint8'], domain=[(0, 255), (0, 10)]) ds = MultiProcessRunnerZMQ(ds, 2) ds = BatchData(ds, BATCH_SIZE) return ds def get_config(): logger.auto_set_dir() ds_train = get_data('train') ds_test = get_data('test') return TrainConfig( model=Model(), data=QueueInput(ds_train), callbacks=[ ModelSaver(), InferenceRunner(ds_test, [ScalarStats('total_costs')]), ], steps_per_epoch=len(ds_train), max_epoch=100, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu config = get_config() config.session_init = SmartInit(args.load) launch_train_with_config(config, SimpleTrainer())
1,979
25.052632
109
py
SyNet
SyNet-master/tensorpack/examples/basics/cifar-convnet.py
# File: cifar-convnet.py import argparse import os import tensorflow as tf from tensorpack import * from tensorpack.dataflow import dataset from tensorpack.tfutils.summary import * from tensorpack.utils.gpu import get_num_gpu """ A small convnet model for Cifar10 or Cifar100 dataset. Cifar10 trained on 1 GPU: 91% accuracy after 50k iterations. 79 itr/s on P100 Not a good model for Cifar100, just for demonstration. """ class Model(ModelDesc): def __init__(self, cifar_classnum): super(Model, self).__init__() self.cifar_classnum = cifar_classnum def inputs(self): return [tf.TensorSpec((None, 30, 30, 3), tf.float32, 'input'), tf.TensorSpec((None,), tf.int32, 'label')] def build_graph(self, image, label): drop_rate = tf.constant(0.5 if self.training else 0.0) if self.training: tf.summary.image("train_image", image, 10) if tf.test.is_gpu_available(): image = tf.transpose(image, [0, 3, 1, 2]) data_format = 'channels_first' else: data_format = 'channels_last' image = image / 4.0 # just to make range smaller with argscope(Conv2D, activation=BNReLU, use_bias=False, kernel_size=3), \ argscope([Conv2D, MaxPooling, BatchNorm], data_format=data_format): logits = LinearWrap(image) \ .Conv2D('conv1.1', filters=64) \ .Conv2D('conv1.2', filters=64) \ .MaxPooling('pool1', 3, stride=2, padding='SAME') \ .Conv2D('conv2.1', filters=128) \ .Conv2D('conv2.2', filters=128) \ .MaxPooling('pool2', 3, stride=2, padding='SAME') \ .Conv2D('conv3.1', filters=128, padding='VALID') \ .Conv2D('conv3.2', filters=128, padding='VALID') \ .FullyConnected('fc0', 1024 + 512, activation=tf.nn.relu) \ .Dropout(rate=drop_rate) \ .FullyConnected('fc1', 512, activation=tf.nn.relu) \ .FullyConnected('linear', out_dim=self.cifar_classnum)() cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct') # monitor training error add_moving_summary(tf.reduce_mean(correct, name='accuracy')) # weight decay on all W of fc layers wd_cost = regularize_cost('fc.*/W', l2_regularizer(4e-4), name='regularize_loss') add_moving_summary(cost, wd_cost) add_param_summary(('.*/W', ['histogram'])) # monitor W return tf.add_n([cost, wd_cost], name='cost') def optimizer(self): lr = tf.get_variable('learning_rate', initializer=1e-2, trainable=False) tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr, epsilon=1e-3) def get_data(train_or_test, cifar_classnum): isTrain = train_or_test == 'train' if cifar_classnum == 10: ds = dataset.Cifar10(train_or_test) else: ds = dataset.Cifar100(train_or_test) if isTrain: augmentors = [ imgaug.RandomCrop((30, 30)), imgaug.Flip(horiz=True), imgaug.Brightness(63), imgaug.Contrast((0.2, 1.8)), imgaug.MeanVarianceNormalize(all_channel=True) ] else: augmentors = [ imgaug.CenterCrop((30, 30)), imgaug.MeanVarianceNormalize(all_channel=True) ] ds = AugmentImageComponent(ds, augmentors) ds = BatchData(ds, 128, remainder=not isTrain) if isTrain: ds = MultiProcessRunnerZMQ(ds, 5) return ds def get_config(cifar_classnum): # prepare dataset dataset_train = get_data('train', cifar_classnum) dataset_test = get_data('test', cifar_classnum) def lr_func(lr): if lr < 3e-5: raise StopTraining() return lr * 0.31 return TrainConfig( model=Model(cifar_classnum), data=QueueInput(dataset_train), callbacks=[ ModelSaver(), InferenceRunner(dataset_test, ScalarStats(['accuracy', 'cost'])), StatMonitorParamSetter('learning_rate', 'validation_accuracy', lr_func, threshold=0.001, last_k=10, reverse=True), ], max_epoch=150, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100', type=int, default=10) args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu with tf.Graph().as_default(): logger.set_logger_dir(os.path.join('train_log', 'cifar' + str(args.classnum))) config = get_config(args.classnum) config.session_init = SmartInit(args.load) num_gpu = get_num_gpu() trainer = SimpleTrainer() if num_gpu <= 1 \ else SyncMultiGPUTrainerParameterServer(num_gpu) launch_train_with_config(config, trainer)
5,396
34.506579
109
py
SyNet
SyNet-master/tensorpack/examples/basics/svhn-digit-convnet.py
# File: svhn-digit-convnet.py import argparse import os import tensorflow as tf from tensorpack import * from tensorpack.dataflow import dataset from tensorpack.tfutils.summary import * """ A very small SVHN convnet model (only 0.8m parameters). About 2.3% validation error after 70 epochs. 2.15% after 150 epochs. Each epoch iterates over the whole training set (4721 iterations), and takes about 24s on a P100. """ class Model(ModelDesc): def inputs(self): return [tf.TensorSpec([None, 40, 40, 3], tf.float32, 'input'), tf.TensorSpec([None], tf.int32, 'label')] def build_graph(self, image, label): image = image / 128.0 - 1 with argscope(Conv2D, activation=BNReLU, use_bias=False): logits = (LinearWrap(image) .Conv2D('conv1', 24, 5, padding='VALID') .MaxPooling('pool1', 2, padding='SAME') .Conv2D('conv2', 32, 3, padding='VALID') .Conv2D('conv3', 32, 3, padding='VALID') .MaxPooling('pool2', 2, padding='SAME') .Conv2D('conv4', 64, 3, padding='VALID') .Dropout('drop', rate=0.5) .FullyConnected('fc0', 512, bias_initializer=tf.constant_initializer(0.1), activation=tf.nn.relu) .FullyConnected('linear', units=10)()) tf.nn.softmax(logits, name='output') accuracy = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32) add_moving_summary(tf.reduce_mean(accuracy, name='accuracy')) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wd_cost = regularize_cost('fc.*/W', l2_regularizer(0.00001)) add_moving_summary(cost, wd_cost) add_param_summary(('.*/W', ['histogram', 'rms'])) # monitor W return tf.add_n([cost, wd_cost], name='cost') def optimizer(self): lr = tf.train.exponential_decay( learning_rate=1e-3, global_step=get_global_step_var(), decay_steps=4721 * 60, decay_rate=0.2, staircase=True, name='learning_rate') tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr) def get_data(): d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test', shuffle=False) augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = MultiProcessRunner(data_train, 5, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return data_train, data_test if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu logger.auto_set_dir() data_train, data_test = get_data() config = TrainConfig( model=Model(), data=QueueInput(data_train), callbacks=[ ModelSaver(), InferenceRunner(data_test, ScalarStats(['cost', 'accuracy'])) ], max_epoch=350, session_init=SmartInit(args.load) ) launch_train_with_config(config, SimpleTrainer())
3,850
33.079646
97
py
SyNet
SyNet-master/tensorpack/examples/basics/mnist-tflayers.py
# File: mnist-tflayers.py import tensorflow as tf from tensorpack import * from tensorpack.dataflow import dataset from tensorpack.tfutils import summary """ MNIST ConvNet example using tf.layers Mostly the same as 'mnist-convnet.py', the only differences are: 1. use tf.layers 2. use tf.layers variable names to summarize weights """ IMAGE_SIZE = 28 # Monkey-patch tf.layers to support argscope. enable_argscope_for_module(tf.layers) class Model(ModelDesc): def inputs(self): """ Define all the inputs (with type, shape, name) that the graph will need. """ return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'), tf.TensorSpec((None,), tf.int32, 'label')] def build_graph(self, image, label): """This function should build the model which takes the input variables and return cost at the end""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu): l = tf.layers.conv2d(image, 32, 3, name='conv0') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv1') l = tf.layers.conv2d(l, 32, 3, name='conv2') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv3') l = tf.layers.flatten(l) l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0') l = tf.layers.dropout(l, rate=0.5, training=self.training) logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1') # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/kernel', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/kernel', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost def optimizer(self): lr = tf.train.exponential_decay( learning_rate=1e-3, global_step=get_global_step_var(), decay_steps=468 * 10, decay_rate=0.3, staircase=True, name='learning_rate') # This will also put the summary in tensorboard, stat.json and print in terminal # but this time without moving average tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr) def get_data(): train = BatchData(dataset.Mnist('train'), 128) test = BatchData(dataset.Mnist('test'), 256, remainder=True) return train, test if __name__ == '__main__': # automatically setup the directory train_log/mnist-convnet for logging logger.auto_set_dir() dataset_train, dataset_test = get_data() # How many iterations you want in each epoch. # This len(data) is the default value. steps_per_epoch = len(dataset_train) # get the config which contains everything necessary in a training config = TrainConfig( model=Model(), # The input source for training. FeedInput is slow, this is just for demo purpose. # In practice it's best to use QueueInput or others. See tutorials for details. data=FeedInput(dataset_train), callbacks=[ ModelSaver(), # save the model after every epoch InferenceRunner( # run inference(for validation) after every epoch dataset_test, # the DataFlow instance used for validation ScalarStats(['cross_entropy_loss', 'accuracy'])), MaxSaver('validation_accuracy'), # save the model with highest accuracy (prefix 'validation_') ], steps_per_epoch=steps_per_epoch, max_epoch=100, ) launch_train_with_config(config, SimpleTrainer())
5,392
40.806202
116
py
SyNet
SyNet-master/tensorpack/examples/basics/mnist-convnet.py
# File: mnist-convnet.py import tensorflow as tf from tensorpack import * from tensorpack.dataflow import dataset from tensorpack.tfutils import summary """ MNIST ConvNet example. about 0.6% validation error after 30 epochs. """ IMAGE_SIZE = 28 class Model(ModelDesc): # See tutorial at https://tensorpack.readthedocs.io/tutorial/training-interface.html#with-modeldesc-and-trainconfig def inputs(self): """ Define all the inputs (with type, shape, name) that the graph will need. """ return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'), tf.TensorSpec((None,), tf.int32, 'label')] def build_graph(self, image, label): """This function should build the model which takes the input variables (defined above) and return cost at the end.""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu, filters=32): # LinearWrap is just a syntax sugar. # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html logits = (LinearWrap(image) .Conv2D('conv0') .MaxPooling('pool0', 2) .Conv2D('conv1') .Conv2D('conv2') .MaxPooling('pool1', 2) .Conv2D('conv3') .FullyConnected('fc0', 512, activation=tf.nn.relu) .Dropout('dropout', rate=0.5) .FullyConnected('fc1', 10, activation=tf.identity)()) # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch # You can also just call `tf.summary.scalar`. But moving summary has some other benefits. # See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/W', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost def optimizer(self): lr = tf.train.exponential_decay( learning_rate=1e-3, global_step=get_global_step_var(), decay_steps=468 * 10, decay_rate=0.3, staircase=True, name='learning_rate') # This will also put the summary in tensorboard, stat.json and print in terminal, # but this time without moving average tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr) def get_data(): # We don't need any fancy data loading for this simple example. # See dataflow tutorial at https://tensorpack.readthedocs.io/tutorial/dataflow.html train = BatchData(dataset.Mnist('train'), 128) test = BatchData(dataset.Mnist('test'), 256, remainder=True) train = PrintData(train) return train, test if __name__ == '__main__': # automatically setup the directory train_log/mnist-convnet for logging logger.auto_set_dir() dataset_train, dataset_test = get_data() # How many iterations you want in each epoch. # This len(data) is the default value. steps_per_epoch = len(dataset_train) # get the config which contains everything necessary in a training config = TrainConfig( model=Model(), # The input source for training. FeedInput is slow, this is just for demo purpose. # In practice it's best to use QueueInput or others. # See tutorial at https://tensorpack.readthedocs.io/tutorial/extend/input-source.html data=FeedInput(dataset_train), # We use a few simple callbacks in this demo. # See tutorial at https://tensorpack.readthedocs.io/tutorial/callback.html callbacks=[ ModelSaver(), # save the model after every epoch InferenceRunner( # run inference(for validation) after every epoch dataset_test, # the DataFlow instance used for validation ScalarStats( # produce `val_accuracy` and `val_cross_entropy_loss` ['cross_entropy_loss', 'accuracy'], prefix='val')), # MaxSaver needs to come after InferenceRunner to obtain its score MaxSaver('val_accuracy'), # save the model with highest accuracy ], steps_per_epoch=steps_per_epoch, max_epoch=100, ) # Use a simple trainer in this demo. # More trainers with multi-gpu or distributed functionalities are available. # See tutorial at https://tensorpack.readthedocs.io/tutorial/trainer.html launch_train_with_config(config, SimpleTrainer())
6,339
43.647887
119
py
SyNet
SyNet-master/tensorpack/examples/basics/mnist-visualizations.py
# File: mnist-visualizations.py """ The same MNIST ConvNet example, but with weights/activations visualization. """ import tensorflow as tf from tensorpack import * from tensorpack.dataflow import dataset IMAGE_SIZE = 28 def visualize_conv_weights(filters, name): """Visualize use weights in convolution filters. Args: filters: tensor containing the weights [H,W,Cin,Cout] name: label for tensorboard Returns: image of all weight """ with tf.name_scope('visualize_w_' + name): filters = tf.transpose(filters, (3, 2, 0, 1)) # [h, w, cin, cout] -> [cout, cin, h, w] filters = tf.unstack(filters) # --> cout * [cin, h, w] filters = tf.concat(filters, 1) # --> [cin, cout * h, w] filters = tf.unstack(filters) # --> cin * [cout * h, w] filters = tf.concat(filters, 1) # --> [cout * h, cin * w] filters = tf.expand_dims(filters, 0) filters = tf.expand_dims(filters, -1) tf.summary.image('visualize_w_' + name, filters) def visualize_conv_activations(activation, name): """Visualize activations for convolution layers. Remarks: This tries to place all activations into a square. Args: activation: tensor with the activation [B,H,W,C] name: label for tensorboard Returns: image of almost all activations """ import math with tf.name_scope('visualize_act_' + name): _, h, w, c = activation.get_shape().as_list() rows = [] c_per_row = int(math.sqrt(c)) for y in range(0, c - c_per_row, c_per_row): row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5] cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W] row = tf.concat(cols, 1) rows.append(row) viz = tf.concat(rows, 2) tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1)) class Model(ModelDesc): def inputs(self): return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'), tf.TensorSpec((None,), tf.int32, 'label')] def build_graph(self, image, label): image = tf.expand_dims(image * 2 - 1, 3) with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32): c0 = Conv2D('conv0', image) p0 = MaxPooling('pool0', c0, 2) c1 = Conv2D('conv1', p0) c2 = Conv2D('conv2', c1) p1 = MaxPooling('pool1', c2, 2) c3 = Conv2D('conv3', p1) fc1 = FullyConnected('fc0', c3, 512, nl=tf.nn.relu) fc1 = Dropout('dropout', fc1, 0.5) logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity) with tf.name_scope('visualizations'): visualize_conv_weights(c0.variables.W, 'conv0') visualize_conv_activations(c0, 'conv0') visualize_conv_weights(c1.variables.W, 'conv1') visualize_conv_activations(c1, 'conv1') visualize_conv_weights(c2.variables.W, 'conv2') visualize_conv_activations(c2, 'conv2') visualize_conv_weights(c3.variables.W, 'conv3') visualize_conv_activations(c3, 'conv3') tf.summary.image('input', (image + 1.0) * 128., 3) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32), name='accuracy') wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') return tf.add_n([wd_cost, cost], name='total_cost') def optimizer(self): lr = tf.train.exponential_decay( learning_rate=1e-3, global_step=get_global_step_var(), decay_steps=468 * 10, decay_rate=0.3, staircase=True, name='learning_rate') tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr) def get_data(): train = BatchData(dataset.Mnist('train'), 128) test = BatchData(dataset.Mnist('test'), 256, remainder=True) return train, test if __name__ == '__main__': logger.auto_set_dir() dataset_train, dataset_test = get_data() config = TrainConfig( model=Model(), dataflow=dataset_train, callbacks=[ ModelSaver(), InferenceRunner( dataset_test, ScalarStats(['cross_entropy_loss', 'accuracy'])), ], steps_per_epoch=len(dataset_train), max_epoch=100, ) launch_train_with_config(config, SimpleTrainer())
4,841
33.340426
96
py
SyNet
SyNet-master/tensorpack/examples/basics/export-model.py
import argparse import cv2 import tensorflow as tf from tensorpack import * from tensorpack.tfutils.export import ModelExporter """ This example illustrates the process of exporting a model trained in Tensorpack to: - SavedModel format for TensorFlow Serving - A frozen and pruned inference graph (compact) The model applies a laplace filter to the input image. The steps are: 1. train the model by python export-model.py 2. export the model by python export-model.py --export serving --load train_log/export/checkpoint python export-model.py --export compact --load train_log/export/checkpoint 3. run inference by python export-model.py --apply default --load train_log/export/checkpoint python export-model.py --apply inference_graph --load train_log/export/checkpoint python export-model.py --apply compact --load /tmp/compact_graph.pb """ SHAPE = 256 CHANNELS = 3 class Model(ModelDesc): """Just a simple model, which applies the Laplacian-operation to images to showcase the usage of variables, and alternating the inference-graph later. """ def inputs(self): return [tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8, 'input_img'), tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8, 'target_img')] def make_prediction(self, img): img = tf.cast(img, tf.float32) img = tf.image.rgb_to_grayscale(img) k = tf.get_variable('filter', dtype=tf.float32, initializer=[[[[0.]], [[1.]], [[0.]]], [ [[1.]], [[-4.]], [[1.]]], [[[0.]], [[1.]], [[0.]]]]) prediction_img = tf.nn.conv2d(img, k, strides=[1, 1, 1, 1], padding='SAME') return prediction_img def build_graph(self, input_img, target_img): target_img = tf.cast(target_img, tf.float32) target_img = tf.image.rgb_to_grayscale(target_img) self.prediction_img = tf.identity(self.make_prediction(input_img), name='prediction_img') cost = tf.losses.mean_squared_error(target_img, self.prediction_img, reduction=tf.losses.Reduction.MEAN) return tf.identity(cost, name='total_costs') def optimizer(self): lr = tf.get_variable('learning_rate', initializer=0.0, trainable=False) return tf.train.AdamOptimizer(lr) def get_data(subset): ds = FakeData([[SHAPE, SHAPE, CHANNELS], [SHAPE, SHAPE, CHANNELS]], 1000, random=False, dtype=['uint8', 'uint8'], domain=[(0, 255), (0, 10)]) ds = BatchData(ds, 1) return ds class InferenceOnlyModel(Model): """Recreate a different inference graph to accept images encoded as png. """ def inputs(self): # The inference graph only accepts a single image, which is different to the training model. return [tf.TensorSpec((None,), tf.string, 'input_img_bytes')] def build_graph(self, input_img_bytes): # prepare input (png encoded strings to images) input_img = tf.map_fn(lambda x: tf.image.decode_png(x, channels=3), input_img_bytes, dtype=tf.uint8) # just copy the relevant parts to this graph. prediction_img = self.make_prediction(input_img) # outputs should be png encoded strings agains prediction_img = tf.clip_by_value(prediction_img, 0, 255) prediction_img = tf.cast(prediction_img, tf.uint8) prediction_img_bytes = tf.map_fn(tf.image.encode_png, prediction_img, dtype=tf.string) tf.identity(prediction_img_bytes, name='prediction_img_bytes') def export_serving(model_path): """Export trained model to use it in TensorFlow Serving or cloudML. """ pred_config = PredictConfig( session_init=SmartInit(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) ModelExporter(pred_config).export_serving('/tmp/exported') def export_compact(model_path): """Export trained model to use it as a frozen and pruned inference graph in mobile applications. """ pred_config = PredictConfig( session_init=SmartInit(model_path), model=Model(), input_names=['input_img'], output_names=['prediction_img']) ModelExporter(pred_config).export_compact('/tmp/compact_graph.pb') def apply(model_path): """Run inference from a training model checkpoint. """ pred_config = PredictConfig( session_init=SmartInit(model_path), model=Model(), input_names=['input_img'], output_names=['prediction_img']) pred = OfflinePredictor(pred_config) img = cv2.imread('lena.png') prediction = pred([img])[0] cv2.imwrite('applied_default.jpg', prediction[0]) def apply_inference_graph(model_path): """Run inference from a different graph, which receives encoded images buffers. """ pred_config = PredictConfig( session_init=SmartInit(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) pred = OfflinePredictor(pred_config) buf = open('lena.png', 'rb').read() prediction = pred([buf])[0] with open('applied_inference_graph.png', 'wb') as f: f.write(prediction[0]) def apply_compact(graph_path): """Run the pruned and frozen inference graph. """ with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # Note, we just load the graph and do *not* need to initialize anything. with tf.gfile.GFile(graph_path, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def) input_img = sess.graph.get_tensor_by_name('import/input_img:0') prediction_img = sess.graph.get_tensor_by_name('import/prediction_img:0') prediction = sess.run(prediction_img, {input_img: cv2.imread('lena.png')[None, ...]}) cv2.imwrite('applied_compact.png', prediction[0]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--load', help='load model') parser.add_argument('--apply', help='run sampling', default='', choices=['default', 'inference_graph', 'compact']) parser.add_argument('--export', help='export the model', default='', choices=['serving', 'compact']) args = parser.parse_args() if args.apply != '': if args.apply == 'default': apply(args.load) elif args.apply == 'inference_graph': apply_inference_graph(args.load) else: apply_compact(args.load) elif args.export != '': if args.export == 'serving': export_serving(args.load) else: export_compact(args.load) else: logger.auto_set_dir() ds_train = get_data('train') config = TrainConfig( model=Model(), data=QueueInput(ds_train), callbacks=[ ModelSaver(), ], steps_per_epoch=1, max_epoch=1, ) launch_train_with_config(config, SimpleTrainer())
7,254
33.712919
108
py
SyNet
SyNet-master/tensorpack/examples/basics/mnist-tfslim.py
# File: mnist-tfslim.py """ MNIST ConvNet example using TensorFlow-slim. Mostly the same as 'mnist-convnet.py', the only differences are: 1. use slim.layers, slim.arg_scope, etc 2. use slim names to summarize weights """ import tensorflow as tf import tensorflow.contrib.slim as slim from tensorpack import * from tensorpack.dataflow import dataset IMAGE_SIZE = 28 class Model(ModelDesc): def inputs(self): return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'), tf.TensorSpec((None,), tf.int32, 'label')] def build_graph(self, image, label): image = tf.expand_dims(image, 3) image = image * 2 - 1 with slim.arg_scope([slim.layers.fully_connected], weights_regularizer=slim.l2_regularizer(1e-5)): l = slim.layers.conv2d(image, 32, [3, 3], scope='conv0') l = slim.layers.max_pool2d(l, [2, 2], scope='pool0') l = slim.layers.conv2d(l, 32, [3, 3], padding='SAME', scope='conv1') l = slim.layers.conv2d(l, 32, [3, 3], scope='conv2') l = slim.layers.max_pool2d(l, [2, 2], scope='pool1') l = slim.layers.conv2d(l, 32, [3, 3], scope='conv3') l = slim.layers.flatten(l, scope='flatten') l = slim.layers.fully_connected(l, 512, scope='fc0') l = slim.layers.dropout(l, is_training=self.training) logits = slim.layers.fully_connected(l, 10, activation_fn=None, scope='fc1') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') acc = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32) acc = tf.reduce_mean(acc, name='accuracy') summary.add_moving_summary(acc) summary.add_moving_summary(cost) summary.add_param_summary(('.*/weights', ['histogram', 'rms'])) # slim uses different variable names return cost + regularize_cost_from_collection() def optimizer(self): lr = tf.train.exponential_decay( learning_rate=1e-3, global_step=get_global_step_var(), decay_steps=468 * 10, decay_rate=0.3, staircase=True, name='learning_rate') tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr) def get_data(): train = BatchData(dataset.Mnist('train'), 128) test = BatchData(dataset.Mnist('test'), 256, remainder=True) return train, test if __name__ == '__main__': logger.auto_set_dir() dataset_train, dataset_test = get_data() config = TrainConfig( model=Model(), dataflow=dataset_train, callbacks=[ ModelSaver(), InferenceRunner( dataset_test, ScalarStats(['cross_entropy_loss', 'accuracy'])), ], max_epoch=100, ) launch_train_with_config(config, SimpleTrainer())
3,003
32.377778
109
py
SyNet
SyNet-master/tensorpack/examples/FasterRCNN/data.py
# File: data.py import copy import itertools import numpy as np import cv2 from tabulate import tabulate from termcolor import colored from tensorpack.dataflow import ( DataFromList, MapData, MapDataComponent, MultiProcessMapData, MultiThreadMapData, TestDataSpeed, imgaug, ) from tensorpack.utils import logger from tensorpack.utils.argtools import log_once from modeling.model_rpn import get_all_anchors from modeling.model_fpn import get_all_anchors_fpn from common import ( CustomResize, DataFromListOfDict, box_to_point4, filter_boxes_inside_shape, np_iou, point4_to_box, polygons_to_mask, ) from config import config as cfg from dataset import DatasetRegistry, register_coco from utils.np_box_ops import area as np_area from utils.np_box_ops import ioa as np_ioa # import tensorpack.utils.viz as tpviz class MalformedData(BaseException): pass def print_class_histogram(roidbs): """ Args: roidbs (list[dict]): the same format as the output of `training_roidbs`. """ class_names = DatasetRegistry.get_metadata(cfg.DATA.TRAIN[0], 'class_names') # labels are in [1, NUM_CATEGORY], hence +2 for bins hist_bins = np.arange(cfg.DATA.NUM_CATEGORY + 2) # Histogram of ground-truth objects gt_hist = np.zeros((cfg.DATA.NUM_CATEGORY + 1,), dtype=np.int) for entry in roidbs: # filter crowd? gt_inds = np.where((entry["class"] > 0) & (entry["is_crowd"] == 0))[0] gt_classes = entry["class"][gt_inds] if len(gt_classes): assert gt_classes.max() <= len(class_names) - 1 gt_hist += np.histogram(gt_classes, bins=hist_bins)[0] data = list(itertools.chain(*[[class_names[i + 1], v] for i, v in enumerate(gt_hist[1:])])) COL = min(6, len(data)) total_instances = sum(data[1::2]) data.extend([None] * ((COL - len(data) % COL) % COL)) data.extend(["total", total_instances]) data = itertools.zip_longest(*[data[i::COL] for i in range(COL)]) # the first line is BG table = tabulate(data, headers=["class", "#box"] * (COL // 2), tablefmt="pipe", stralign="center", numalign="left") logger.info("Ground-Truth category distribution:\n" + colored(table, "cyan")) class TrainingDataPreprocessor: """ The mapper to preprocess the input data for training. Since the mapping may run in other processes, we write a new class and explicitly pass cfg to it, in the spirit of "explicitly pass resources to subprocess". """ def __init__(self, cfg): self.cfg = cfg self.aug = imgaug.AugmentorList([ CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE), imgaug.Flip(horiz=True) ]) def __call__(self, roidb): fname, boxes, klass, is_crowd = roidb["file_name"], roidb["boxes"], roidb["class"], roidb["is_crowd"] assert boxes.ndim == 2 and boxes.shape[1] == 4, boxes.shape boxes = np.copy(boxes) im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname im = im.astype("float32") height, width = im.shape[:2] # assume floatbox as input assert boxes.dtype == np.float32, "Loader has to return float32 boxes!" if not self.cfg.DATA.ABSOLUTE_COORD: boxes[:, 0::2] *= width boxes[:, 1::2] *= height # augmentation: tfms = self.aug.get_transform(im) im = tfms.apply_image(im) points = box_to_point4(boxes) points = tfms.apply_coords(points) boxes = point4_to_box(points) if len(boxes): assert klass.max() <= self.cfg.DATA.NUM_CATEGORY, \ "Invalid category {}!".format(klass.max()) assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!" ret = {"image": im} # Add rpn data to dataflow: try: if self.cfg.MODE_FPN: multilevel_anchor_inputs = self.get_multilevel_rpn_anchor_input(im, boxes, is_crowd) for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs): ret["anchor_labels_lvl{}".format(i + 2)] = anchor_labels ret["anchor_boxes_lvl{}".format(i + 2)] = anchor_boxes else: ret["anchor_labels"], ret["anchor_boxes"] = self.get_rpn_anchor_input(im, boxes, is_crowd) boxes = boxes[is_crowd == 0] # skip crowd boxes in training target klass = klass[is_crowd == 0] ret["gt_boxes"] = boxes ret["gt_labels"] = klass except MalformedData as e: log_once("Input {} is filtered for training: {}".format(fname, str(e)), "warn") return None if self.cfg.MODE_MASK: # augmentation will modify the polys in-place segmentation = copy.deepcopy(roidb["segmentation"]) segmentation = [segmentation[k] for k in range(len(segmentation)) if not is_crowd[k]] assert len(segmentation) == len(boxes) # Apply augmentation on polygon coordinates. # And produce one image-sized binary mask per box. masks = [] width_height = np.asarray([width, height], dtype=np.float32) gt_mask_width = int(np.ceil(im.shape[1] / 8.0) * 8) # pad to 8 in order to pack mask into bits for polys in segmentation: if not self.cfg.DATA.ABSOLUTE_COORD: polys = [p * width_height for p in polys] polys = [tfms.apply_coords(p) for p in polys] masks.append(polygons_to_mask(polys, im.shape[0], gt_mask_width)) if len(masks): masks = np.asarray(masks, dtype='uint8') # values in {0, 1} masks = np.packbits(masks, axis=-1) else: # no gt on the image masks = np.zeros((0, im.shape[0], gt_mask_width // 8), dtype='uint8') ret['gt_masks_packed'] = masks # from viz import draw_annotation, draw_mask # viz = draw_annotation(im, boxes, klass) # for mask in masks: # viz = draw_mask(viz, mask) # tpviz.interactive_imshow(viz) return ret def get_rpn_anchor_input(self, im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: The anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWxNA fm_boxes: fHxfWxNAx4 NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS """ boxes = boxes.copy() all_anchors = np.copy( get_all_anchors( stride=self.cfg.RPN.ANCHOR_STRIDE, sizes=self.cfg.RPN.ANCHOR_SIZES, ratios=self.cfg.RPN.ANCHOR_RATIOS, max_size=self.cfg.PREPROC.MAX_SIZE, ) ) # fHxfWxAx4 -> (-1, 4) featuremap_anchors_flatten = all_anchors.reshape((-1, 4)) # only use anchors inside the image inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2]) # obtain anchor labels and their corresponding gt boxes anchor_labels, anchor_gt_boxes = self.get_anchor_labels( inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1] ) # Fill them back to original size: fHxfWx1, fHxfWx4 num_anchor = self.cfg.RPN.NUM_ANCHOR anchorH, anchorW = all_anchors.shape[:2] featuremap_labels = -np.ones((anchorH * anchorW * num_anchor,), dtype="int32") featuremap_labels[inside_ind] = anchor_labels featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, num_anchor)) featuremap_boxes = np.zeros((anchorH * anchorW * num_anchor, 4), dtype="float32") featuremap_boxes[inside_ind, :] = anchor_gt_boxes featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, num_anchor, 4)) return featuremap_labels, featuremap_boxes # TODO: can probably merge single-level logic with FPN logic to simplify code def get_multilevel_rpn_anchor_input(self, im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4 """ boxes = boxes.copy() anchors_per_level = get_all_anchors_fpn( strides=self.cfg.FPN.ANCHOR_STRIDES, sizes=self.cfg.RPN.ANCHOR_SIZES, ratios=self.cfg.RPN.ANCHOR_RATIOS, max_size=self.cfg.PREPROC.MAX_SIZE, ) flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level] all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0) inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2]) anchor_labels, anchor_gt_boxes = self.get_anchor_labels( inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1] ) # map back to all_anchors, then split to each level num_all_anchors = all_anchors_flatten.shape[0] all_labels = -np.ones((num_all_anchors,), dtype="int32") all_labels[inside_ind] = anchor_labels all_boxes = np.zeros((num_all_anchors, 4), dtype="float32") all_boxes[inside_ind] = anchor_gt_boxes start = 0 multilevel_inputs = [] for level_anchor in anchors_per_level: assert level_anchor.shape[2] == len(self.cfg.RPN.ANCHOR_RATIOS) anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS num_anchor_this_level = np.prod(anchor_shape) end = start + num_anchor_this_level multilevel_inputs.append( (all_labels[start:end].reshape(anchor_shape), all_boxes[start:end, :].reshape(anchor_shape + (4,))) ) start = end assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors) return multilevel_inputs def get_anchor_labels(self, anchors, gt_boxes, crowd_boxes): """ Label each anchor as fg/bg/ignore. Args: anchors: Ax4 float gt_boxes: Bx4 float, non-crowd crowd_boxes: Cx4 float Returns: anchor_labels: (A,) int. Each element is {-1, 0, 1} anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg. """ # This function will modify labels and return the filtered inds def filter_box_label(labels, value, max_num): curr_inds = np.where(labels == value)[0] if len(curr_inds) > max_num: disable_inds = np.random.choice(curr_inds, size=(len(curr_inds) - max_num), replace=False) labels[disable_inds] = -1 # ignore them curr_inds = np.where(labels == value)[0] return curr_inds NA, NB = len(anchors), len(gt_boxes) if NB == 0: # No groundtruth. All anchors are either background or ignored. anchor_labels = np.zeros((NA,), dtype="int32") filter_box_label(anchor_labels, 0, self.cfg.RPN.BATCH_PER_IM) return anchor_labels, np.zeros((NA, 4), dtype="float32") box_ious = np_iou(anchors, gt_boxes) # NA x NB ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA, ious_max_per_anchor = box_ious.max(axis=1) ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB # for each gt, find all those anchors (including ties) that has the max ious with it anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0] # Setting NA labels: 1--fg 0--bg -1--ignore anchor_labels = -np.ones((NA,), dtype="int32") # NA, # the order of setting neg/pos labels matter anchor_labels[anchors_with_max_iou_per_gt] = 1 anchor_labels[ious_max_per_anchor >= self.cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1 anchor_labels[ious_max_per_anchor < self.cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0 # label all non-ignore candidate boxes which overlap crowd as ignore if crowd_boxes.size > 0: cand_inds = np.where(anchor_labels >= 0)[0] cand_anchors = anchors[cand_inds] ioas = np_ioa(crowd_boxes, cand_anchors) overlap_with_crowd = cand_inds[ioas.max(axis=0) > self.cfg.RPN.CROWD_OVERLAP_THRESH] anchor_labels[overlap_with_crowd] = -1 # Subsample fg labels: ignore some fg if fg is too many target_num_fg = int(self.cfg.RPN.BATCH_PER_IM * self.cfg.RPN.FG_RATIO) fg_inds = filter_box_label(anchor_labels, 1, target_num_fg) # Keep an image even if there is no foreground anchors # if len(fg_inds) == 0: # raise MalformedData("No valid foreground for RPN!") # Subsample bg labels. num_bg is not allowed to be too many old_num_bg = np.sum(anchor_labels == 0) if old_num_bg == 0: # No valid bg in this image, skip. raise MalformedData("No valid background for RPN!") target_num_bg = self.cfg.RPN.BATCH_PER_IM - len(fg_inds) filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values # Set anchor boxes: the best gt_box for each fg anchor anchor_boxes = np.zeros((NA, 4), dtype="float32") fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :] anchor_boxes[fg_inds, :] = fg_boxes # assert len(fg_inds) + np.sum(anchor_labels == 0) == self.cfg.RPN.BATCH_PER_IM return anchor_labels, anchor_boxes def get_train_dataflow(): """ Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """ roidbs = list(itertools.chain.from_iterable(DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN)) print_class_histogram(roidbs) # Filter out images that have no gt boxes, but this filter shall not be applied for testing. # The model does support training with empty images, but it is not useful for COCO. num = len(roidbs) if cfg.DATA.FILTER_EMPTY_ANNOTATIONS: roidbs = list(filter(lambda img: len(img["boxes"][img["is_crowd"] == 0]) > 0, roidbs)) logger.info( "Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format( num - len(roidbs), len(roidbs) ) ) ds = DataFromList(roidbs, shuffle=True) preprocess = TrainingDataPreprocessor(cfg) if cfg.DATA.NUM_WORKERS > 0: if cfg.TRAINER == "horovod": buffer_size = cfg.DATA.NUM_WORKERS * 10 # one dataflow for each process, therefore don't need large buffer ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: buffer_size = cfg.DATA.NUM_WORKERS * 20 ds = MultiProcessMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds def get_eval_dataflow(name, shard=0, num_shards=1): """ Args: name (str): name of the dataset to evaluate shard, num_shards: to get subset of evaluation data """ roidbs = DatasetRegistry.get(name).inference_roidbs() logger.info("Found {} images for inference.".format(len(roidbs))) num_imgs = len(roidbs) img_per_shard = num_imgs // num_shards img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs) # no filter for training ds = DataFromListOfDict(roidbs[img_range[0]: img_range[1]], ["file_name", "image_id"]) def f(fname): im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname return im ds = MapDataComponent(ds, f, 0) # Evaluation itself may be multi-threaded, therefore don't add prefetch here. return ds if __name__ == "__main__": import os from tensorpack.dataflow import PrintData from config import finalize_configs register_coco(os.path.expanduser("~/data/coco")) finalize_configs() ds = get_train_dataflow() ds = PrintData(ds, 10) TestDataSpeed(ds, 50000).start() for k in ds: pass
16,896
40.111922
119
py
SyNet
SyNet-master/tensorpack/examples/FasterRCNN/common.py
# File: common.py import numpy as np import cv2 from tensorpack.dataflow import RNGDataFlow from tensorpack.dataflow.imgaug import ImageAugmentor, ResizeTransform class DataFromListOfDict(RNGDataFlow): def __init__(self, lst, keys, shuffle=False): self._lst = lst self._keys = keys self._shuffle = shuffle self._size = len(lst) def __len__(self): return self._size def __iter__(self): if self._shuffle: self.rng.shuffle(self._lst) for dic in self._lst: dp = [dic[k] for k in self._keys] yield dp class CustomResize(ImageAugmentor): """ Try resizing the shortest edge to a certain number while avoiding the longest edge to exceed max_size. """ def __init__(self, short_edge_length, max_size, interp=cv2.INTER_LINEAR): """ Args: short_edge_length ([int, int]): a [min, max] interval from which to sample the shortest edge length. max_size (int): maximum allowed longest edge length. """ super(CustomResize, self).__init__() if isinstance(short_edge_length, int): short_edge_length = (short_edge_length, short_edge_length) self._init(locals()) def get_transform(self, img): h, w = img.shape[:2] size = self.rng.randint( self.short_edge_length[0], self.short_edge_length[1] + 1) scale = size * 1.0 / min(h, w) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size if max(newh, neww) > self.max_size: scale = self.max_size * 1.0 / max(newh, neww) newh = newh * scale neww = neww * scale neww = int(neww + 0.5) newh = int(newh + 0.5) return ResizeTransform(h, w, newh, neww, self.interp) def box_to_point4(boxes): """ Convert boxes to its corner points. Args: boxes: nx4 Returns: (nx4)x2 """ b = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]] b = b.reshape((-1, 2)) return b def point4_to_box(points): """ Args: points: (nx4)x2 Returns: nx4 boxes (x1y1x2y2) """ p = points.reshape((-1, 4, 2)) minxy = p.min(axis=1) # nx2 maxxy = p.max(axis=1) # nx2 return np.concatenate((minxy, maxxy), axis=1) def polygons_to_mask(polys, height, width): """ Convert polygons to binary masks. Args: polys: a list of nx2 float array. Each array contains many (x, y) coordinates. Returns: a binary matrix of (height, width) """ polys = [p.flatten().tolist() for p in polys] assert len(polys) > 0, "Polygons are empty!" import pycocotools.mask as cocomask rles = cocomask.frPyObjects(polys, height, width) rle = cocomask.merge(rles) return cocomask.decode(rle) def clip_boxes(boxes, shape): """ Args: boxes: (...)x4, float shape: h, w """ orig_shape = boxes.shape boxes = boxes.reshape([-1, 4]) h, w = shape boxes[:, [0, 1]] = np.maximum(boxes[:, [0, 1]], 0) boxes[:, 2] = np.minimum(boxes[:, 2], w) boxes[:, 3] = np.minimum(boxes[:, 3], h) return boxes.reshape(orig_shape) def filter_boxes_inside_shape(boxes, shape): """ Args: boxes: (nx4), float shape: (h, w) Returns: indices: (k, ) selection: (kx4) """ assert boxes.ndim == 2, boxes.shape assert len(shape) == 2, shape h, w = shape indices = np.where( (boxes[:, 0] >= 0) & (boxes[:, 1] >= 0) & (boxes[:, 2] <= w) & (boxes[:, 3] <= h))[0] return indices, boxes[indices, :] try: import pycocotools.mask as cocomask # Much faster than utils/np_box_ops def np_iou(A, B): def to_xywh(box): box = box.copy() box[:, 2] -= box[:, 0] box[:, 3] -= box[:, 1] return box ret = cocomask.iou( to_xywh(A), to_xywh(B), np.zeros((len(B),), dtype=np.bool)) # can accelerate even more, if using float32 return ret.astype('float32') except ImportError: from utils.np_box_ops import iou as np_iou # noqa
4,285
24.664671
90
py
SyNet
SyNet-master/tensorpack/examples/FasterRCNN/eval.py
# File: eval.py import itertools import json import numpy as np import os import sys import tensorflow as tf from collections import namedtuple from concurrent.futures import ThreadPoolExecutor from contextlib import ExitStack import cv2 import pycocotools.mask as cocomask import tqdm from scipy import interpolate from tensorpack.callbacks import Callback from tensorpack.tfutils.common import get_tf_version_tuple from tensorpack.utils import logger, get_tqdm from common import CustomResize, clip_boxes from config import config as cfg from data import get_eval_dataflow from dataset import DatasetRegistry try: import horovod.tensorflow as hvd except ImportError: pass DetectionResult = namedtuple( 'DetectionResult', ['box', 'score', 'class_id', 'mask']) """ box: 4 float score: float class_id: int, 1~NUM_CLASS mask: None, or a binary image of the original image shape """ def _scale_box(box, scale): w_half = (box[2] - box[0]) * 0.5 h_half = (box[3] - box[1]) * 0.5 x_c = (box[2] + box[0]) * 0.5 y_c = (box[3] + box[1]) * 0.5 w_half *= scale h_half *= scale scaled_box = np.zeros_like(box) scaled_box[0] = x_c - w_half scaled_box[2] = x_c + w_half scaled_box[1] = y_c - h_half scaled_box[3] = y_c + h_half return scaled_box def _paste_mask(box, mask, shape): """ Args: box: 4 float mask: MxM floats shape: h,w Returns: A uint8 binary image of hxw. """ assert mask.shape[0] == mask.shape[1], mask.shape if cfg.MRCNN.ACCURATE_PASTE: # This method is accurate but much slower. mask = np.pad(mask, [(1, 1), (1, 1)], mode='constant') box = _scale_box(box, float(mask.shape[0]) / (mask.shape[0] - 2)) mask_pixels = np.arange(0.0, mask.shape[0]) + 0.5 mask_continuous = interpolate.interp2d(mask_pixels, mask_pixels, mask, fill_value=0.0) h, w = shape ys = np.arange(0.0, h) + 0.5 xs = np.arange(0.0, w) + 0.5 ys = (ys - box[1]) / (box[3] - box[1]) * mask.shape[0] xs = (xs - box[0]) / (box[2] - box[0]) * mask.shape[1] # Waste a lot of compute since most indices are out-of-border res = mask_continuous(xs, ys) return (res >= 0.5).astype('uint8') else: # This method (inspired by Detectron) is less accurate but fast. # int() is floor # box fpcoor=0.0 -> intcoor=0.0 x0, y0 = list(map(int, box[:2] + 0.5)) # box fpcoor=h -> intcoor=h-1, inclusive x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive x1 = max(x0, x1) # require at least 1x1 y1 = max(y0, y1) w = x1 + 1 - x0 h = y1 + 1 - y0 # rounding errors could happen here, because masks were not originally computed for this shape. # but it's hard to do better, because the network does not know the "original" scale mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8') ret = np.zeros(shape, dtype='uint8') ret[y0:y1 + 1, x0:x1 + 1] = mask return ret def predict_image(img, model_func): """ Run detection on one image, using the TF callable. This function should handle the preprocessing internally. Args: img: an image model_func: a callable from the TF model. It takes image and returns (boxes, probs, labels, [masks]) Returns: [DetectionResult] """ orig_shape = img.shape[:2] resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE) resized_img = resizer.augment(img) scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1]) boxes, probs, labels, *masks = model_func(resized_img) # Some slow numpy postprocessing: boxes = boxes / scale # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more. boxes = clip_boxes(boxes, orig_shape) if masks: full_masks = [_paste_mask(box, mask, orig_shape) for box, mask in zip(boxes, masks[0])] masks = full_masks else: # fill with none masks = [None] * len(boxes) results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)] return results def predict_dataflow(df, model_func, tqdm_bar=None): """ Args: df: a DataFlow which produces (image, image_id) model_func: a callable from the TF model. It takes image and returns (boxes, probs, labels, [masks]) tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None, will create a new one. Returns: list of dict, in the format used by `DatasetSplit.eval_inference_results` """ df.reset_state() all_results = [] with ExitStack() as stack: if tqdm_bar is None: tqdm_bar = stack.enter_context(get_tqdm(total=df.size())) for img, img_id in df: results = predict_image(img, model_func) for r in results: # int()/float() to make it json-serializable res = { 'image_id': img_id, 'category_id': int(r.class_id), 'bbox': [round(float(x), 4) for x in r.box], 'score': round(float(r.score), 4), } # also append segmentation to results if r.mask is not None: rle = cocomask.encode( np.array(r.mask[:, :, None], order='F'))[0] rle['counts'] = rle['counts'].decode('ascii') res['segmentation'] = rle all_results.append(res) tqdm_bar.update(1) return all_results def multithread_predict_dataflow(dataflows, model_funcs): """ Running multiple `predict_dataflow` in multiple threads, and aggregate the results. Args: dataflows: a list of DataFlow to be used in :func:`predict_dataflow` model_funcs: a list of callable to be used in :func:`predict_dataflow` Returns: list of dict, in the format used by `DatasetSplit.eval_inference_results` """ num_worker = len(model_funcs) assert len(dataflows) == num_worker if num_worker == 1: return predict_dataflow(dataflows[0], model_funcs[0]) kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {} with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \ tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar: futures = [] for dataflow, pred in zip(dataflows, model_funcs): futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar)) all_results = list(itertools.chain(*[fut.result() for fut in futures])) return all_results class EvalCallback(Callback): """ A callback that runs evaluation once a while. It supports multi-gpu evaluation. """ _chief_only = False def __init__(self, eval_dataset, in_names, out_names, output_dir): self._eval_dataset = eval_dataset self._in_names, self._out_names = in_names, out_names self._output_dir = output_dir def _setup_graph(self): num_gpu = cfg.TRAIN.NUM_GPUS if cfg.TRAINER == 'replicated': buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)] # Use two predictor threads per GPU to get better throughput self.num_predictor = num_gpu if buggy_tf else num_gpu * 2 self.predictors = [self._build_predictor(k % num_gpu) for k in range(self.num_predictor)] self.dataflows = [get_eval_dataflow(self._eval_dataset, shard=k, num_shards=self.num_predictor) for k in range(self.num_predictor)] else: # Only eval on the first machine, # Because evaluation assumes that all horovod workers share the filesystem. # Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs self._horovod_run_eval = hvd.rank() == hvd.local_rank() if self._horovod_run_eval: self.predictor = self._build_predictor(0) self.dataflow = get_eval_dataflow(self._eval_dataset, shard=hvd.local_rank(), num_shards=hvd.local_size()) self.barrier = hvd.allreduce(tf.random_normal(shape=[1])) def _build_predictor(self, idx): return self.trainer.get_predictor(self._in_names, self._out_names, device=idx) def _before_train(self): eval_period = cfg.TRAIN.EVAL_PERIOD self.epochs_to_eval = set() for k in itertools.count(1): if k * eval_period > self.trainer.max_epoch: break self.epochs_to_eval.add(k * eval_period) self.epochs_to_eval.add(self.trainer.max_epoch) logger.info("[EvalCallback] Will evaluate every {} epochs".format(eval_period)) def _eval(self): logdir = self._output_dir if cfg.TRAINER == 'replicated': all_results = multithread_predict_dataflow(self.dataflows, self.predictors) else: filenames = [os.path.join( logdir, 'outputs{}-part{}.json'.format(self.global_step, rank) ) for rank in range(hvd.local_size())] if self._horovod_run_eval: local_results = predict_dataflow(self.dataflow, self.predictor) fname = filenames[hvd.local_rank()] with open(fname, 'w') as f: json.dump(local_results, f) self.barrier.eval() if hvd.rank() > 0: return all_results = [] for fname in filenames: with open(fname, 'r') as f: obj = json.load(f) all_results.extend(obj) os.unlink(fname) scores = DatasetRegistry.get(self._eval_dataset).eval_inference_results(all_results) for k, v in scores.items(): self.trainer.monitors.put_scalar(self._eval_dataset + '-' + k, v) def _trigger_epoch(self): if self.epoch_num in self.epochs_to_eval: logger.info("Running evaluation ...") self._eval()
10,667
35.409556
118
py