repo
stringlengths 2
99
| file
stringlengths 14
239
| code
stringlengths 20
3.99M
| file_length
int64 20
3.99M
| avg_line_length
float64 9.73
128
| max_line_length
int64 11
86.4k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ZINBAE | ZINBAE-master/ZINBAE.py | """
Implementation of ZINBAE model
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
def sampling_gumbel(shape, eps=1e-8):
u = tf.random_uniform(shape, minval=0., maxval=1)
return -tf.log(-tf.log(u+eps)+eps)
def compute_softmax(logits,temp):
z = logits + sampling_gumbel( K.shape(logits) )
return K.softmax( z / temp )
def gumbel_softmax(args):
logits,temp = args
y = compute_softmax(logits,temp)
return y
class ZINB_AE(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
pi = self.autoencoder.get_layer(name='pi').output
disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
# zero-inflated outputs
tau_input = Input(shape=(self.dims[0],), name='tau_input')
pi_ = self.autoencoder.get_layer('pi').output
mean_ = self.autoencoder.output
pi_log_ = Lambda(lambda x:tf.log(x+self.eps))(pi_)
nondrop_pi_log_ = Lambda(lambda x:tf.log(1-x+self.eps))(pi_)
pi_log_ = Reshape( target_shape=(self.dims[0],1) )(pi_log_)
nondrop_pi_log_ = Reshape( target_shape=(self.dims[0],1) )(nondrop_pi_log_)
logits = Concatenate(axis=-1)([pi_log_,nondrop_pi_log_])
temp_ = RepeatVector( 2 )(tau_input)
temp_ = Permute( (2,1) )(temp_)
samples_ = Lambda( gumbel_softmax,output_shape=(self.dims[0],2,) )( [logits,temp_] )
samples_ = Lambda( lambda x:x[:,:,1] )(samples_)
samples_ = Reshape( target_shape=(self.dims[0],) )(samples_)
output_ = Multiply(name='ZI_output')([mean_, samples_])
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1], tau_input],
outputs=[output_, self.autoencoder.output])
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=[0.01, 1], optimizer='adam', model_file='model_weight.h5'):
self.model.compile(loss={'ZI_output': mse_loss_v2, 'slice': self.zinb_loss}, loss_weights=loss_weights, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
# anneal tau
tau0 = 1.
min_tau = 0.5
anneal_rate = 0.0003
tau = tau0
# es = EarlyStopping(monitor="loss", patience=20, verbose=1)
for e in range(maxiter):
if e % 100 == 0:
tau = max( tau0*np.exp( -anneal_rate * e),min_tau )
tau_in = np.ones( x[0].shape,dtype='float32' ) * tau
print(tau)
print("Epoch %d/%d" % (e, maxiter))
self.model.fit(x=[x[0], x[1], tau_in], y=x_count, batch_size=batch_size, epochs=1, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=2000, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae_model.autoencoder.summary()
zinbae_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae_model.fit(x=[adata.X, adata.obs.size_factors], x_count=[adata.raw.X, adata.raw.X], batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=[args.gamma, 1], optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 10,280 | 39.636364 | 154 | py |
ZINBAE | ZINBAE-master/loss.py | import numpy as np
import tensorflow as tf
from keras import backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nan2inf(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.equal(nelem, 0.), 1., nelem), x.dtype)
def _reduce_mean(x):
nelem = _nelem(x)
x = _nan2zero(x)
return tf.divide(tf.reduce_sum(x), nelem)
def mse_loss(y_true, y_pred):
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
def mse_loss_v2(y_true, y_pred):
y_true = tf.log(y_true+1)
y_pred = tf.log(y_pred+1)
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
class NB(object):
def __init__(self, theta=None, masking=False, scope='nbinom_loss/',
scale_factor=1.0, debug=False):
# for numerical stability
self.eps = 1e-10
self.scale_factor = scale_factor
self.debug = debug
self.scope = scope
self.masking = masking
self.theta = theta
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps)
t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps)))
if self.debug:
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans')]
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
with tf.control_dependencies(assert_ops):
final = t1 + t2
else:
final = t1 + t2
final = _nan2inf(final)
if mean:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final
class ZINB(NB):
def __init__(self, pi, ridge_lambda=0.0, scope='zinb_loss/', **kwargs):
super().__init__(scope=scope, **kwargs)
self.pi = pi
self.ridge_lambda = ridge_lambda
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
# reuse existing NB neg.log.lik.
# mean is always False here, because everything is calculated
# element-wise. we take the mean only in the end
nb_case = super().loss(y_true, y_pred, mean=False) - tf.log(1.0-self.pi+eps)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
theta = tf.minimum(self.theta, 1e6)
zero_nb = tf.pow(theta/(theta+y_pred+eps), theta)
zero_case = -tf.log(self.pi + ((1.0-self.pi)*zero_nb)+eps)
result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
ridge = self.ridge_lambda*tf.square(self.pi)
result += ridge
if mean:
if self.masking:
result = _reduce_mean(result)
else:
result = tf.reduce_mean(result)
result = _nan2inf(result)
if self.debug:
tf.summary.histogram('nb_case', nb_case)
tf.summary.histogram('zero_nb', zero_nb)
tf.summary.histogram('zero_case', zero_case)
tf.summary.histogram('ridge', ridge)
return result
| 4,141 | 30.142857 | 122 | py |
ZINBAE | ZINBAE-master/layers.py | from keras.engine.topology import Layer
from keras.layers import Lambda
from keras import backend as K
import tensorflow as tf
class ConstantDispersionLayer(Layer):
'''
An identity layer which allows us to inject extra parameters
such as dispersion to Keras models
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.theta = self.add_weight(shape=(1, input_shape[1]),
initializer='zeros',
trainable=True,
name='theta')
self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4)
super().build(input_shape)
def call(self, x):
return tf.identity(x)
def compute_output_shape(self, input_shape):
return input_shape
class SliceLayer(Layer):
def __init__(self, index, **kwargs):
self.index = index
super().__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('Input should be a list')
super().build(input_shape)
def call(self, x):
assert isinstance(x, list), 'SliceLayer input is not a list'
return x[self.index]
def compute_output_shape(self, input_shape):
return input_shape[self.index]
nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))
ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)),
tf.ones((1, l[0].get_shape()[1]),
dtype=l[1].dtype))),
name=name)
| 1,798 | 32.314815 | 98 | py |
ZINBAE | ZINBAE-master/ZINBAE0.py | """
Implementation of scDeepCluster for scRNA-seq data
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
class ZINB_AE0(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
self.pi = pi = self.autoencoder.get_layer(name='pi').output
self.disp = disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1]],
outputs=self.autoencoder.output)
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=0.1, optimizer='adam', model_file='model_weight.h5'):
class custom_loss(object):
def __init__(self, pi=None, zinb_loss=None):
self.pi = pi
self.zinb_loss = zinb_loss
def custom_loss(self, y_true, y_pred):
loss1 = mse_loss_v2(y_true, (1-self.pi)*y_pred)
loss2 = self.zinb_loss(y_true, y_pred)
return loss1*loss_weights + loss2
loss = custom_loss(self.pi, self.zinb_loss)
self.model.compile(loss=loss.custom_loss, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
self.model.fit(x=[x[0], x[1]], y=x_count, batch_size=batch_size, epochs=maxiter, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=500, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae0_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae0_model.autoencoder.summary()
zinbae0_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae0_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae0_model.fit(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=args.gamma, optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae0_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 8,888 | 39.040541 | 154 | py |
ZINBAE | ZINBAE-master/preprocess.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle, os, numbers
import numpy as np
import scipy as sp
import pandas as pd
import scanpy.api as sc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
#TODO: Fix this
class AnnSequence:
def __init__(self, matrix, batch_size, sf=None):
self.matrix = matrix
if sf is None:
self.size_factors = np.ones((self.matrix.shape[0], 1),
dtype=np.float32)
else:
self.size_factors = sf
self.batch_size = batch_size
def __len__(self):
return len(self.matrix) // self.batch_size
def __getitem__(self, idx):
batch = self.matrix[idx*self.batch_size:(idx+1)*self.batch_size]
batch_sf = self.size_factors[idx*self.batch_size:(idx+1)*self.batch_size]
# return an (X, Y) pair
return {'count': batch, 'size_factors': batch_sf}, batch
def read_dataset(adata, transpose=False, test_split=False, copy=False):
if isinstance(adata, sc.AnnData):
if copy:
adata = adata.copy()
elif isinstance(adata, str):
adata = sc.read(adata)
else:
raise NotImplementedError
norm_error = 'Make sure that the dataset (adata.X) contains unnormalized count data.'
assert 'n_count' not in adata.obs, norm_error
if adata.X.size < 50e6: # check if adata.X is integer only if array is small
if sp.sparse.issparse(adata.X):
assert (adata.X.astype(int) != adata.X).nnz == 0, norm_error
else:
assert np.all(adata.X.astype(int) == adata.X), norm_error
if transpose: adata = adata.transpose()
if test_split:
train_idx, test_idx = train_test_split(np.arange(adata.n_obs), test_size=0.1, random_state=42)
spl = pd.Series(['train'] * adata.n_obs)
spl.iloc[test_idx] = 'test'
adata.obs['DCA_split'] = spl.values
else:
adata.obs['DCA_split'] = 'train'
adata.obs['DCA_split'] = adata.obs['DCA_split'].astype('category')
print('### Autoencoder: Successfully preprocessed {} genes and {} cells.'.format(adata.n_vars, adata.n_obs))
return adata
def normalize(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def read_genelist(filename):
genelist = list(set(open(filename, 'rt').read().strip().split('\n')))
assert len(genelist) > 0, 'No genes detected in genelist file'
print('### Autoencoder: Subset of {} genes will be denoised.'.format(len(genelist)))
return genelist
def write_text_matrix(matrix, filename, rownames=None, colnames=None, transpose=False):
if transpose:
matrix = matrix.T
rownames, colnames = colnames, rownames
pd.DataFrame(matrix, index=rownames, columns=colnames).to_csv(filename,
sep='\t',
index=(rownames is not None),
header=(colnames is not None),
float_format='%.6f')
def read_pickle(inputfile):
return pickle.load(open(inputfile, "rb")) | 4,580 | 33.969466 | 112 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/IKS.py | from Treap import Treap
from math import log
class IKS:
def __init__(self):
self.treap = None
self.n = [0, 0]
@staticmethod
def KSThresholdForPValue(pvalue, N):
'''Threshold for KS Test given a p-value
Args:
pval (float): p-value.
N (int): the size of the samples.
Returns:
Threshold t to compare groups 0 and 1. The null-hypothesis is discarded if KS() > t.
'''
ca = (-0.5 * log(pvalue)) ** 0.5
return ca * (2.0 * N / N ** 2)
@staticmethod
def CAForPValue(pvalue):
'''ca for KS Test given a p-value
Args:
pval (float): p-value.
Returns:
Threshold the "ca" that can be used to compute a threshold for KS().
'''
return (-0.5 * log(pvalue)) ** 0.5
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return max(self.treap.max_value, -self.treap.min_value) / N
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return (self.treap.max_value - self.treap.min_value) / N
def Add(self, obs, group):
'''Insert new observation into one of the groups.
Args:
obs: the value of the obseration. Tip: a tuple (actual value, random value) is recommended when there is overlap between groups or if values are not guaranteed to be mostly unique.
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] += 1
left, left_g, right, val = None, None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
left, left_g = Treap.SplitGreatest(left)
val = 0 if left_g is None else left_g.value
left = Treap.Merge(left, left_g)
right = Treap.Merge(Treap(key, val), right)
Treap.SumAll(right, 1 if group == 0 else -1)
self.treap = Treap.Merge(left, right)
def Remove(self, obs, group):
'''Remove observation from one of the groups.
Args:
obs: the value of the obseration. Must be identical to a previously inserted observation (including the random element of a tuple, if this was the case).
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] -= 1
left, right, right_l = None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
right_l, right = Treap.SplitSmallest(right)
if right_l is not None and right_l.key == key:
Treap.SumAll(right, -1 if group == 0 else 1)
else:
right = Treap.Merge(right_l, right)
self.treap = Treap.Merge(left, right)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
ca = ca or 1.95
n = self.n[0]
return self.KS() > ca * (2 * n / n ** 2) ** 0.5
IKS.AddObservation = IKS.Add
IKS.RemoveObservation = IKS.Remove
| 3,778 | 29.475806 | 199 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/Treap.py | from random import random
class Treap:
def __init__(self, key, value = 0):
self.key = key
self.value = value
self.priority = random()
self.size = 1
self.height = 1
self.lazy = 0
self.max_value = value
self.min_value = value
self.left = None
self.right = None
@staticmethod
def SumAll(node, value):
if node is None:
return
node.value += value
node.max_value += value
node.min_value += value
node.lazy += value
@classmethod
def Unlazy(cls, node):
cls.SumAll(node.left, node.lazy)
cls.SumAll(node.right, node.lazy)
node.lazy = 0
@classmethod
def Update(cls, node):
if node is None:
return
cls.Unlazy(node)
node.size = 1
node.height = 0
node.max_value = node.value
node.min_value = node.value
if node.left is not None:
node.size += node.left.size
node.height = node.left.height
node.max_value = max(node.max_value, node.left.max_value)
node.min_value = min(node.min_value, node.left.min_value)
if node.right is not None:
node.size += node.right.size
node.height = max(node.height, node.right.height)
node.max_value = max(node.max_value, node.right.max_value)
node.min_value = min(node.min_value, node.right.min_value)
node.height += 1
@classmethod
def SplitKeepRight(cls, node, key):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if key <= node.key:
left, node.left = cls.SplitKeepRight(node.left, key)
right = node
else:
node.right, right = cls.SplitKeepRight(node.right, key)
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def Merge(cls, left, right):
if left is None:
return right
if right is None:
return left
node = None
if left.priority > right.priority:
cls.Unlazy(left)
left.right = cls.Merge(left.right, right)
node = left
else:
cls.Unlazy(right)
right.left = cls.Merge(left, right.left)
node = right
cls.Update(node)
return node
@classmethod
def SplitSmallest(cls, node):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if node.left is not None:
left, node.left = cls.SplitSmallest(node.left)
right = node
else:
right = node.right
node.right = None
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def SplitGreatest(cls, node):
if node is None:
return None, None
cls.Unlazy(node)
if node.right is not None:
node.right, right = cls.SplitGreatest(node.right)
left = node
else:
left = node.left
node.left = None
right = node
cls.Update(left)
cls.Update(right)
return left, right
@staticmethod
def Size(node):
return 0 if node is None else node.size
@staticmethod
def Height(node):
return 0 if node is None else node.height
@classmethod
def _ToList(cls, node, extractor, _list = None):
if _list is None:
_list = []
if node is None:
return _list
cls.Unlazy(node)
cls._ToList(node.left, extractor, _list)
_list.append(extractor(node))
cls._ToList(node.right, extractor, _list)
return _list
@classmethod
def KeysToList(cls, node, _list = None):
extractor = lambda x: x.key
return cls._ToList(node, extractor, _list)
@classmethod
def ValuesToList(cls, node, _list = None):
extractor = lambda x: x.value
return cls._ToList(node, extractor, _list)
| 3,699 | 21.02381 | 64 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/ForgettingBuffer.py | class Node:
def __init__(self, value):
self.value = value
self.next = None
class ForgettingBuffer:
def __init__(self, values):
self.first = None
self.last = None
for val in values:
if self.first == None:
self.first = Node(val)
self.last = self.first
else:
self.last.next = Node(val)
self.last = self.last.next
def __iter__(self):
cur = self.first
while cur != None:
yield cur.value
cur = cur.next
def Increment(self, value):
first_value = self.first.value
self.first = self.first.next
self.last.next = Node(value)
self.last = self.last.next
return first_value
Add = Increment
__call__ = Increment
def Values(self):
return list(self)
if __name__ == "__main__":
fb = ForgettingBuffer([1, 2, 3, 4, 5])
for val in fb:
print(val)
fb(10)
fb(11)
fb(12)
print(list(fb))
print(fb.Values())
| 930 | 18.808511 | 40 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/IKS.py | from cffi import FFI
ffi = FFI()
ffi.cdef("""
typedef struct {
void * pointer;
} IKS_WrappedPointer;
IKS_WrappedPointer IKS_NewGeneratorWithSeed(unsigned seed);
IKS_WrappedPointer IKS_NewGenerator(void);
void IKS_DeleteGenerator(IKS_WrappedPointer pointer);
IKS_WrappedPointer IKS_NewIKS(IKS_WrappedPointer generatorPointer);
void IKS_DeleteIKS(IKS_WrappedPointer pointer);
int IKS_Test(IKS_WrappedPointer pointer, double ca);
double IKS_KS(IKS_WrappedPointer pointer);
double IKS_Kuiper(IKS_WrappedPointer pointer);
void IKS_AddObservation(IKS_WrappedPointer pointer, double obs, int which_sample);
void IKS_RemoveObservation(IKS_WrappedPointer pointer, double obs, int which_sample);
void IKS_AddCompositeObservation(IKS_WrappedPointer pointer, double obs, double obs_p2, int which_sample);
void IKS_RemoveCompositeObservation(IKS_WrappedPointer pointer, double obs, double obs_p2, int which_sample);
double IKS_KSThresholdForPValue(double pvalue, int N);
double IKS_CAForPValue(double pvalue);
""")
clib = ffi.dlopen("iks.dll")
class Generator:
def __init__(self, seed = None):
if seed == None:
self.wp = clib.IKS_NewGenerator()
else:
self.wp = clib.IKS_NewGeneratorWithSeed(seed)
def __del__(self):
clib.IKS_DeleteGenerator(self.wp)
global_generator = Generator()
class IKS:
def __init__(self, generator = global_generator):
self.wp = clib.IKS_NewIKS(generator.wp)
def __del__(self):
clib.IKS_DeleteIKS(self.wp)
def AddObservation(self, obs, sample):
'''Insert new observation into one of the groups.
Args:
obs: the value of the obseration. Tip: a tuple (actual value, random value) is recommended when there is overlap between groups or if values are not guaranteed to be mostly unique.
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
if isinstance(obs, tuple):
clib.IKS_AddCompositeObservation(self.wp, obs[0], obs[1], sample)
else:
clib.IKS_AddObservation(self.wp, obs, sample)
def RemoveObservation(self, obs, sample):
'''Remove observation from one of the groups.
Args:
obs: the value of the obseration. Must be identical to a previously inserted observation (including the random element of a tuple, if this was the case).
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
if isinstance(obs, tuple):
clib.IKS_RemoveCompositeObservation(self.wp, obs[0], obs[1], sample)
else:
clib.IKS_RemoveObservation(self.wp, obs, sample)
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
return clib.IKS_KS(self.wp)
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
return clib.IKS_Kuiper(self.wp)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
return clib.IKS_Test(self.wp, ca) == 1
@staticmethod
def KSThresholdForPValue(pvalue, N):
'''Threshold for KS Test given a p-value
Args:
pval (float): p-value.
N (int): the size of the samples.
Returns:
Threshold t to compare groups 0 and 1. The null-hypothesis is discarded if KS() > t.
'''
return clib.IKS_KSThresholdForPValue(pvalue, N)
@staticmethod
def CAForPValue(pvalue):
'''ca for KS Test given a p-value
Args:
pval (float): p-value.
Returns:
Threshold the "ca" that can be used to compute a threshold for KS().
'''
return clib.IKS_CAForPValue(pvalue)
IKS.Add = IKS.AddObservation
IKS.Remove = IKS.RemoveObservation
if __name__ == "__main__":
import random
iks = IKS()
for i in range(0, 10):
iks.AddObservation(i, 0)
iks.AddObservation(i, 1)
print(iks.KS())
print(iks.Kuiper())
print(iks.Test())
iks = IKS()
for i in range(0, 10):
iks.AddObservation(random.random(), 0)
iks.AddObservation(random.random(), 1)
print(iks.KS())
print(iks.Kuiper())
print(iks.Test())
| 4,612 | 30.813793 | 199 | py |
pyterpol | pyterpol-master/grid_to_binary.py |
import os
import argparse
import numpy as np
def main():
ps = argparse.ArgumentParser()
ps.add_argument('--remove', action='store_true', default=False, help='Removes ascii files.')
ps.add_argument('--overwrite', action='store_true', default=False, help='Overwrites binary files -- mandatory for every machine swap. ')
args = ps.parse_args()
print args
# get grids directory
for gdname in ['grids', 'grids_ABS']:
cwd = os.getcwd()
gdir = os.path.join(cwd, gdname)
dl = os.listdir(gdir)
# go through each grid directory
for direc in dl:
# path to directory
path = os.path.join(gdir, direc)
# directories only
if not os.path.isdir(path):
continue
# list of spectra
gl = os.path.join(path, 'gridlist')
# load the list
synlist = np.loadtxt(gl, dtype=str, unpack=True, usecols=[0])
# transform each spectrum to binary
for synspec in synlist:
# define name of the binary file
bin_synspec = synspec + '.npz'
if os.path.isfile(os.path.join(path, bin_synspec)) and not args.overwrite:
print "File: %s exists." % bin_synspec
if os.path.isfile(os.path.join(path, synspec)) and args.remove:
os.remove(os.path.join(path, synspec))
continue
# load the ascii spectrum and save it as binary file
w, i = np.loadtxt(os.path.join(path, synspec), unpack=True, usecols=[0, 1])
np.savez(os.path.join(path, bin_synspec), w, i)
if os.path.isfile(os.path.join(path, synspec)) and args.remove:
os.remove(os.path.join(path, synspec))
if __name__ == '__main__':
main()
| 1,998 | 34.696429 | 140 | py |
pyterpol | pyterpol-master/fitting/fitter.py | import os
import nlopt
import emcee
# import warnings
import numpy as np
from scipy.optimize import fmin
from scipy.optimize import fmin_slsqp
try:
from scipy.optimize import differential_evolution
except ImportError as ex:
print ex
differential_evolution = None
from pyterpol.synthetic.auxiliary import parlist_to_list
from pyterpol.synthetic.auxiliary import string2bool
from pyterpol.synthetic.auxiliary import read_text_file
from pyterpol.synthetic.auxiliary import renew_file
fitters = dict(
sp_nelder_mead=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxiter', 'maxfun'],
object=fmin,
uses_bounds=False,
info='Nelder-Mead simplex algorithm. '
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin Ineffective for high dimensional'
' parameter space.'),
sp_slsqp=dict(par0type='value',
optional_kwargs=['ftol'],
object=fmin_slsqp,
uses_bounds=True,
info='Sequential Least Square Programming. '
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin Ineffective for high dimensional'
' parameter spacse.'),
sp_diff_evol=dict(par0type='limit',
optional_kwargs=['popsize', 'tol', 'strategy', 'maxiter'],
object=differential_evolution,
uses_bounds=False,
info='Differential evolution algorithm.'
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin.'),
nlopt_nelder_mead=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxfun'],
object=None,
environment=nlopt.LN_NELDERMEAD,
uses_bounds=True,
info='Nelder-Mead Simplex. Implementation NLOPT: Steven G. Johnson, '
'The NLopt nonlinear-optimization package, http://ab-initio.mit.edu/nlopt.'),
nlopt_sbplx=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxfun'],
object=None,
environment=nlopt.LN_SBPLX,
uses_bounds=True,
info='Sbplx - a variation of the Tom Rowans Subplex. '
'Implementation NLOPT: Steven G. Johnson, The NLopt '
'nonlinear-optimization package, http://ab-initio.mit.edu/nlopt.'),
)
class Fitter(object):
"""
"""
def __init__(self, name=None, fitparams=None, verbose=False, debug=False, fitlog='fit.log', **kwargs):
"""
:param name: name of the fitting environment
:param fitparams a list of Parameter types
:param verbose whether to save detailed chi_square information
:param debug: debugmode
:param fitlog: file in which the fitting is logged
:param kwargs: fitting environment control keywords
:return:
"""
# pass the parameters
if fitparams is None:
self.fitparams = []
else:
self.fitparams = fitparams
self.verbose = verbose
self.fitlog = fitlog
self.debug = debug
self.fittername = name
# empty parameters
self.fitter = None
self.fit_kwargs = {}
self.par0 = []
self.uses_bounds = False
self.family = None
self.vmins = None
self.vmaxs = None
self.nlopt_environment = None
# empty list of all trial fits
self.iters = []
self.parameter_identification = None
# iteration number
self.iter_number = 0
# choose a fitter if one
# was given
if name is not None:
self.choose_fitter(name, **kwargs)
def __call__(self, func, *args):
"""
:param func:
:param args:
:return:
"""
# emtpy the fitlog
renew_file(self.fitlog)
# reset the counter and clear the fitting
self.iter_number = 0
self.iters = []
# debug
if self.debug:
print "Started fitted with fitting environment: %s\n" \
" vector of parameters: %s and optional" \
" enviromental parameters: %s." % (self.fittername, str(self.par0), str(self.fit_kwargs))
if len(self.par0) == 0:
raise ValueError('No initial vector of parameters (wrapped in Parameter class) was passed.')
# check that initial parameters do not lie outside the fitted region.
self.check_initial_parameters()
# run fitting
if self.family == 'sp':
if self.uses_bounds:
bounds = [[vmin, vmax] for vmin, vmax in zip(self.vmins, self.vmaxs)]
self.result = self.fitter(func, self.par0, args=args, bounds=bounds, **self.fit_kwargs)
else:
self.result = self.fitter(func, self.par0, args=args, **self.fit_kwargs)
elif self.family == 'nlopt':
# define function for the nlopt fitter
def f(x, grad):
return func(x, *args)
# check that we are searching minimum
self.fitter.set_min_objective(f)
# the fitting
self.result = self.fitter.optimize(self.par0)
# we want only set of parameters for the result
# very in elegant
if not isinstance(self.result, (list, tuple, type(np.array([])))):
self.result = self.result.x
def __str__(self):
"""
String representation of the class.
:return:
"""
string = ''
string += 'Fitter: %s optional_arguments: %s\n' % (self.fittername, str(self.fit_kwargs))
string += 'Initial parameters:'
for i, par in enumerate(self.fitparams):
string += "(%s, g.): (%s, %s); " % (par['name'], str(self.par0[i]), str(par['group']))
if (i + 1) % 5 == 0:
string += '\n'
string += '\n'
return string
def append_iteration(self, iter):
"""
Appends each iteration.
:param iter the iteration
:return:
"""
# TODO this function has to be improved.
self.iter_number += 1
# print iter
self.iters.append(iter)
# if the number of iterations exceeds a certain number
# they are written to a file
if self.iter_number % 1000 < 1:
self.flush_iters()
self.iters = []
def clear_all(self):
"""
:return:
"""
self.__init__()
def check_initial_parameters(self):
"""
Checks that initial parameters do not lie outside the fitted region.
:return:
"""
p0 = self.par0
for i, p in enumerate(self.fitparams):
# differential evolution uses interval as a p0, and
# this function tests only floats
if isinstance(p0[i], (list, tuple)):
continue
if (p0[i] > p['vmax']) | (p0[i] < p['vmin']):
raise ValueError('Parameter %s (group %i) lies outside the fitted regions! %f not in (%f, %f)' %
(p['name'], p['group'], p['value'], p['vmin'], p['vmax']))
def choose_fitter(self, name, fitparams=None, init_step=None, **kwargs):
"""
Selects a fitter from the list of available ones and
prepares the fitting variables.
:param name: name of the fitting environment
:param fitparams: list of fitted parameters ech wrapped within Parameter class
:param kwargs: keyword arguments controlling the respective fitting environement
:return:
"""
# clear the class first
self.clear_all()
# check the input
if name.lower() not in fitters.keys():
raise ValueError('Fitter: %s is unknown. Registered fitters are:\n %s.' % (name, self.list_fitters()))
else:
self.fitter = fitters[name]['object']
self.fittername = name
for key in kwargs.keys():
if key not in fitters[name]['optional_kwargs']:
raise KeyError('The parameter: %s is not listed among '
'optional_kwargs for fitter: %s. The eligible'
'optional_kwargs are: %s' % (key, name, str(fitters[name]['optional_kwargs'])))
else:
self.fit_kwargs[key] = kwargs[key]
if self.debug:
print 'Choosing environment: %s\n' \
' environmental parameters: %s.' % (name, str(self.fit_kwargs))
# if we want to change the fitted parameters
if fitparams is None:
fitparams = self.fitparams
else:
self.fitparams = fitparams
# set up initial value
if fitters[name]['par0type'] == 'value':
self.par0 = parlist_to_list(fitparams, property='value')
if fitters[name]['par0type'] == 'limit':
vmins = parlist_to_list(fitparams, property='vmin')
vmaxs = parlist_to_list(fitparams, property='vmax')
self.par0 = [[vmin, vmax] for vmin, vmax in zip(vmins, vmaxs)]
if self.debug:
print 'Setting initial parameters: %s' % str(self.par0)
# checks that there are any fitting boundaries
if fitters[name]['uses_bounds']:
self.uses_bounds = True
self.vmins = parlist_to_list(fitparams, property='vmin')
self.vmaxs = parlist_to_list(fitparams, property='vmax')
else:
self.uses_bounds = False
# set up family
self.family = name.split('_')[0]
if self.family == 'nlopt':
self.nlopt_environment = fitters[name]['environment']
self.setup_nlopt(init_step=init_step)
def flush_iters(self, f=None):
"""
Flushes all records within self.iters to a file
:param f: filename
:return:
"""
if f is None:
f = self.fitlog
# create a block of lines
lines = []
# if the file is empty add header
# print os.path.getsize(self.fitlog)
if os.path.getsize(self.fitlog) == 0:
# construct the header
header = self.make_header()
lines.append(header)
for row in self.iters:
line = ''
# create a row of parameters + chi2
p = row['parameters']
d = np.zeros(len(p)+1)
d[:-1] = p
d[-1] = row['chi2']
for i in range(0, len(d)):
line += '%s ' % str(d[i])
line += '\n'
# append the row
lines.append(line)
# print line
# write the to a file
ofile = open(f, 'a')
ofile.writelines(lines)
ofile.close()
def run_mcmc(self, chi_square, chain_file, fitparams, nwalkers, niter, *args):
"""
:param chi_square
:param fitparams
:param nwalkers
:param niter
:param args
:return:
"""
def lnlike(pars, *args):
"""
Model probability.
:param pars:
:param args:
:return:
"""
return -0.5*chi_square(pars, *args)
# define the boundaries and the priors
def lnprior(pars):
"""
Prior probabilities i.e. boundaries.
:param pars:
:return:
"""
for p, vmin, vmax in zip(pars, self.vmins, self.vmaxs):
if (p < vmin) | (p > vmax):
return -np.inf
return 0.0
def lnprob(pars, *args):
"""
The full probability function.
:param pars:
:param args:
:return:
"""
lp = lnprior(pars)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(pars, *args)
# get the dimensions
ndim = len(fitparams)
# initialize the sampler
pos = np.array([[wmin + (wmax - wmin) * np.random.rand() for wmin, wmax in zip(self.vmins, self.vmaxs)]
for i in range(nwalkers)])
# setup the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=args)
# initialize the file - create the header
if self.parameter_identification is not None:
header = [self.make_header()]
else:
header = ['']
# write the header and close the file
ofile = open(chain_file, 'w')
ofile.writelines(header)
ofile.close()
# run the sampler
for result in sampler.sample(pos, iterations=niter, storechain=False):
position = result[0]
ofile = open(chain_file, 'a')
for k in range(position.shape[0]):
ofile.write("%d %s %f\n" % (k, " ".join(['%.12f' % i for i in position[k]]), result[1][k]))
ofile.close()
@staticmethod
def list_fitters():
"""
Lists all fitters.
:return: string : a list of all fitters.
"""
string = '\n'.rjust(100, '=')
for key in fitters.keys():
string += "Name: %s\n" % key
string += "Optional parameters: %s\n" % str(fitters[key]['optional_kwargs'])
string += "Uses boundaries: %s\n" % str(fitters[key]['uses_bounds'])
string += "Description: %s\n" % fitters[key]['info']
string += '\n'.rjust(100, '=')
return string
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('FITTER') > -1:
data_start = i
break
# check that there are actually some data in the file
if data_start >= len(lines):
return False
# create the class
fitter = Fitter()
name = None
fit_kwargs = {}
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach FITTER again we end
if l.find('FITTER') > -1:
break
# split the line
d = l.split()
# print d
# save the name
if d[0].find('fitter:') > -1:
name = d[1]
# save the kwargs
elif d[0].find('fit_parameters:') > -1:
d = d[1:]
if len(d) < 2:
continue
fit_kwargs = {d[i].strip(':'): float(d[i+1]) for i in range(0, len(d), 2)}
# do the same for enviromental keys
if d[0].find('env_keys:') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug', 'verbose', 'fitlog']
cast_types = [string2bool, string2bool, str]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(fitter, k, cdict[k])
# choose the fitter
if name != 'None':
fitter.choose_fitter(name, **fit_kwargs)
else:
return False
# finally assign everything to self
attrs = ['debug', 'fittername', 'verbose', 'fitlog', 'fit_kwargs']
for attr in attrs:
setattr(self, attr, getattr(fitter, attr))
# if we got here, we loaded the data
return True
def make_header(self):
"""
Creates the header for output file.
:return:
"""
header = ''
for key in self.parameter_identification.keys():
if key != 'value':
header += '# %s: ' % key
for rec in self.parameter_identification[key]:
header += '%s ' % str(rec)
header += '\n'
return header
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
Since this class really cannot exist without the
interface, it really saves only the selected fitting
environment and fitted kwargs.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# row announcing the fitter
string = ' FITTER '.rjust(105, '#').ljust(200, '#') + '\n'
# name of the fitter
string += 'fitter: %s\n' % self.fittername
string += 'fit_parameters: '
# writes the fitting kwargs
for fkey in self.fit_kwargs:
string += '%s: %s ' % (fkey, str(self.fit_kwargs[fkey]))
string += '\n'
# writes enfiromental keys
enviromental_keys = ['debug', 'verbose', 'fitlog']
string += 'env_keys: '
for fkey in enviromental_keys:
string += "%s: %s " % (fkey, str(getattr(self, fkey)))
string += '\n'
string += ' FITTER '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def setup_nlopt(self, init_step=None):
"""
Sets up the the NLOPT fitter.
:return:
"""
if self.debug:
print "Setting up NLOPT minimizer."
# length of the fitted parameters
n = len(self.fitparams)
# configures the fitter
self.fitter = nlopt.opt(self.nlopt_environment, n)
# setup parameters for fitting terminatio
for key in self.fit_kwargs.keys():
if key == 'xtol':
self.fitter.set_xtol_rel(self.fit_kwargs[key])
if key == 'ftol':
self.fitter.set_ftol_rel(self.fit_kwargs[key])
if key == 'maxfun':
self.fitter.set_maxeval(self.fit_kwargs[key])
# setup boundaries
if self.uses_bounds:
self.fitter.set_lower_bounds(self.vmins)
self.fitter.set_upper_bounds(self.vmaxs)
# setup initial step, which can be either
# user-defined or default
if init_step is None:
stepsize = (np.array(self.vmaxs) - np.array(self.vmins)) / 4.
stepsize = stepsize.tolist()
else:
stepsize = init_step
self.fitter.set_initial_step(stepsize)
def set_lower_boundary(self, arr):
"""
Sets lower boundary.
:param arr:
:return:
"""
self.vmins = arr
def set_upper_boundary(self, arr):
"""
Sets upper boundary.
:param arr:
:return:
"""
self.vmaxs = arr
def set_fit_properties(self, pi):
"""
Sets identification of parameters i.e. names, groups and components
:param pi: dictionary with the records for each parameter
the order have to be the same as for the fitted parameter
:return:
"""
self.parameter_identification = pi
| 20,033 | 31.842623 | 114 | py |
pyterpol | pyterpol-master/fitting/interface.py | import copy
import corner
# import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from pyterpol.synthetic.makespectrum import SyntheticGrid
from pyterpol.observed.observations import ObservedSpectrum
from pyterpol.fitting.parameter import Parameter
from pyterpol.fitting.parameter import parameter_definitions
from pyterpol.fitting.fitter import Fitter
from pyterpol.synthetic.auxiliary import generate_least_number
from pyterpol.synthetic.auxiliary import keys_to_lowercase
from pyterpol.synthetic.auxiliary import read_text_file
from pyterpol.synthetic.auxiliary import string2bool
from pyterpol.synthetic.auxiliary import sum_dict_keys
from pyterpol.synthetic.auxiliary import ZERO_TOLERANCE
from pyterpol.plotting.plotting import *
# repeat userwarnings
warnings.simplefilter('always', UserWarning)
class Interface(object):
"""
"""
def __init__(self, sl=None, rl=None, ol=None, fitter=None, debug=False,
adaptive_resolution=True, spectrum_by_spectrum=None,
log_iterations=False):
"""
:param sl: StarList type
:param rl: RegionList type
:param ol: ObservedList type
:param fitter
:param debug
:param adaptive_resolution - this (sounds better than it actually is)
just means that resolution of the grid is set to twice
the resolution of the spectrum with highest resolution
:return:
"""
# StarList is deepcopied by value, because
# it is adjusted by the Interface
if sl is not None:
self.sl = sl.copy()
else:
self.sl = None
# RegionList and the ObservedList are copied
# by reference
self.rl = rl
self.ol = ol
self.synthetics = {}
self.grids = {}
self.fitter = fitter
self.spectrum_by_spectrum = spectrum_by_spectrum
# debug mode
self.debug = debug
# define empty comparison list
self.comparisonList = None
# parameters that cannot be obatined through interpolation
self._not_given_by_grid = ['lr', 'rv', 'vrot']
# relation between rv_groups and regions
self.rel_rvgroup_region = {}
# properties of synthetic spectra
self._synthetic_spectrum_kwargs = dict(step=0.01, order=4, padding=20.)
# properties of grids
self._grid_kwargs = dict(mode='default', debug=debug)
# initialization of various boolean variables
self.grid_properties_passed = False
self.fit_is_running = False
self.adaptive_resolution = adaptive_resolution
self.log_iterations = log_iterations
# temporary variable for info on the fitted parameters
self.ident_fitted_pars = None
self.one4all = False
def __str__(self):
"""
String representation of the class
:return:
"""
string = ""
for attr, name in zip(['sl', 'rl', 'ol', 'fitter'], ['StarList', 'RegionList', 'ObservedList', 'Fitter']):
string += '%s%s\n' % (name[:len(name)/2].rjust(50, '='), name[len(name)/2:].ljust(50, '='))
string += str(getattr(self, attr))
string += ''.ljust(100, '=')
return string
def accept_fit(self):
"""
Propagates the fitting result to the class.
:return:
"""
# this should be done more carefully
final_pars = self.fitter.result
print "FINAL PARAMETERS:", final_pars
# list fitted parameters
fitparams = self.get_fitted_parameters()
# updates the parameters with the result
for i in range(0, len(final_pars)):
fitparams[i]['value'] = final_pars[i]
# update the fitter with new initial parameters
self.fitter.par0 = copy.deepcopy(final_pars)
def add_comparison(self, region=None, parameters={}, observed=None, groups={}):
"""
:param region the name of the corresponding region
:param parameters a dictionary of the parameters required for the synthetic
spectrum
:param observed the observed spectrum
:param groups
Add a record to the comparisonList
:return: None
"""
if self.debug:
print 'Settting comparison for region: %s \n groups: %s. \n parameters: %s' % \
(str(region), str(groups), str(parameters))
if self.comparisonList is None:
raise Exception('The comparisonList has not been defined yet. Use Inteface.ready_comparison for that.')
else:
# pass the regions
wmin = self.rl.mainList[region]['wmin']
wmax = self.rl.mainList[region]['wmax']
# try to read out the observed spectrum - everything
if observed is not None:
try:
ow, oi, oe = observed.get_spectrum(wmin, wmax)
except:
# if it does not work out..
ow = observed.get_spectrum(wmin, wmax)
oi = None
oe = None
self.comparisonList.append(dict(region=region,
parameters=parameters,
observed=observed,
groups=groups,
synthetic={x: None for x in parameters.keys()},
chi2=0.0,
wmin=wmin,
wmax=wmax,
wave=ow,
intens=oi,
error=oe
)
)
def clear_all(self):
"""
Clears the class.
:return:
"""
self.comparisonList = None
self.grids = {}
self.ol = None
self.rl = None
self.sl = None
self.fitter = None
self.synthetics = {}
self._grid_kwargs = {}
self._synthetic_spectrum_kwargs = {}
self.rel_rvgroup_region = {}
self.grid_properties_passed = False
self.ident_fitted_pars = None
def compute_chi2(self, pars=[], l=None, verbose=False):
"""
:param pars:
:param l
:param verbose
:return: chi square
"""
if l is None:
l = self.comparisonList
# accounts for cases when we just evaluate current chi^2
if len(pars) == 0:
pars = self.get_fitted_parameters(attribute='value')
# propagate the parameters to the
# parameterlist and update it
self.propagate_and_update_parameters(l, pars)
# reads out the chi_2 from individual spectra
chi2 = self.read_chi2_from_comparisons(l, verbose)
# if we are fitting we store the info on the parameters
if self.fit_is_running & self.log_iterations:
self.fitter.append_iteration(dict(parameters=copy.deepcopy(pars), chi2=chi2))
else:
self.fitter.iter_number += 1
# print every hundredth iteration
if self.debug:
print 'Computed model: %s chi2: %s' % (str(pars), str(chi2))
else:
if (self.fitter.iter_number+1) % 100 == 0:
print 'Computed model: %s chi2: %s' % (str(pars), str(chi2))
return chi2
def compute_chi2_treshold(self, l=None, alpha=0.67):
"""
Computes confidence level from normallized chi^2.
It is of course not correct, but what can be done,
when the model is evidently incorrect??
:param l the list of comparisons
:param alpha the chi-square treshold
:return:
"""
# use in-built comparison list of
# no other was passed
if l is None:
l = self.comparisonList
# get the degrees of freedom
ddof = self.get_degrees_of_freedom(l)
# estimate confidence limits
chi2 = stats.chi2(ddof)
vmin, vmax = chi2.interval(alpha)
# now get vthe maximal value relative
# to the minimal - minimal value is
# what we get with the minimization
# ratio = vmax/vmin
diff = vmax-vmin
# return ratio
return diff
def change_observed_list(self, ol):
"""
Removes the old observe list and adds a new one.
It also resets the group assignment between
regions a and radial velocity groups. Each
observed spectrum should have a rv group
assigned. Otherwise the outcome might be
wrong.
:param ol:
:return:
"""
if self.ol is None:
warnings.warn('There was no ObservedList attached to the Interface. Correct?')
else:
self.ol.clear_all()
# attach new observed list
self.ol = ol
# reset the rv-group settings
self._setup_rv_groups()
def copy(self):
"""
Creates a copy of self.
:return:
"""
other = Interface()
for attr in ['ol', 'sl', 'rl', 'fitter', 'spectrum_by_spectrum',
'adaptive_resolution', 'debug', '_grid_kwargs',
'_synthetic_spectrum_kwargs']:
v = copy.deepcopy(getattr(self, attr))
setattr(other, attr, v)
return other
def choose_fitter(self, *args, **kwargs):
"""
Just wrapper for the Fitter.choose_fitter method
see parameter descriptio there.
:param args:
:param kwargs:
:return:
"""
# fitter is rather simple, so if there is none set, we set an empty
# one
if self.fitter is None:
self.fitter = Fitter(debug=self.debug)
# select fitted parameters
if 'fitparams' not in kwargs.keys():
fitparams = self.get_fitted_parameters()
kwargs['fitparams'] = fitparams
self.fitter.choose_fitter(*args, **kwargs)
def draw_random_sample(self):
"""
Takes a random sample from the data. This random sample
contains the same name of observations as the original
one -- i.e. some observations repeat within the sample.
:return:
"""
# get number of observations
nobs = len(self.ol)
# take original spectra and groups
rv_groups = self.ol.observedSpectraList['group']['rv']
spectra = self.ol.observedSpectraList['spectrum']
# make random data sample
ind = np.sort(np.random.randint(nobs, size=nobs))
random_rv_groups = [rv_groups[i] for i in ind]
random_spectra = [spectra[i] for i in ind]
# reset group numbers
newobs = []
for i in range(0, len(random_spectra)):
newobs.append(dict(filename=random_spectra[i].filename,
error=random_spectra[i].global_error,
group=dict(rv=i),
hjd=random_spectra[i].hjd),
)
# create new list of observations
ol = ObservedList()
ol.add_observations(newobs)
# copy the starlist
sl_new = self.sl.copy()
for i, rndg in enumerate(random_rv_groups):
pars = self.sl.get_parameter(rv=rndg)
for c in self.sl.get_components():
sl_new.set_parameter(name='rv', component=c, group=i, value=pars[c][0].value)
# get regions
rl = self.rl
# create bew Interface
itf = Interface(sl=sl_new, rl=rl, ol=ol)
# set attributes
setattr(itf, 'grids', self.grids)
setattr(itf, 'synthetics', self.synthetics)
setattr(itf, '_grid_kwargs', self._grid_kwargs)
setattr(itf, '_synthetic_spectrum_kwargs', self._synthetic_spectrum_kwargs)
setattr(itf, 'fitter', self.fitter)
setattr(itf, 'adaptive_resolution', self.adaptive_resolution)
setattr(itf, 'debug', self.debug)
# finalize
itf._setup_rv_groups()
itf.ready_comparisons()
itf.populate_comparisons()
return itf
@staticmethod
def extract_parameters(l, attr='value'):
"""
Converts a list of parameter class to a
dictionary.
:param l
:param attr
:return:
"""
params = {par['name']: par[attr] for par in l}
return params
@staticmethod
def evaluate_mcmc(f=None, treshold=100):
"""
Returns best-fit values and errors estimated from the convergence.
:param f: mcmc log
:param treshold
:return:
"""
# read the fitlog
log, nwalkers, niter, npars = read_mc_chain(f)
# take only data, where the mcmc, has burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# best result
minind = np.argmin(-log['data'][:, -1])
# outputlist of errors
errors = {}
# fill the dictionary with errors
for i in range(0, len(log['component'])):
# parameter component, group
p = log['name'][i]
c = log['component'][i]
g = log['group'][i]
if c not in errors.keys():
errors[c] = {}
if p not in errors[c].keys():
errors[c][p] = []
# get the error estimate
best = log['data'][minind, i]
lower = log['data'][:, i].min() - best
upper = log['data'][:, i].max() - best
gauss_mean = log['data'][:, i].mean()
gauss_sigma = log['data'][:, i].std(ddof=1)
# append the value
errors[c][p].append(dict(best=best, group=g, gauss_mean=gauss_mean,
gauss_sigma=gauss_sigma, lower=lower, upper=upper))
return errors
def get_comparisons(self, verbose=False, **kwargs):
"""
Narrows down the number of comparisons.
:param verbose return indices in the original list
:param kwargs parameters according to the comparison list will be narrowed down
:return:
"""
# empty arrays for the output
clist = []
indices = []
# parameter keys
keys = kwargs.keys()
# go over each recordd within list of comparisons
for i in range(0, len(self.comparisonList)):
# the keys that we test are somewhat heterogeneous
# thsi construction is not pretty.
include = True
for key in keys:
# print key
# what if the key lies
if key in self.comparisonList[i]['groups'].keys() \
and (kwargs[key] != self.comparisonList[i]['groups'][key]):
include = False
break
if hasattr(self.comparisonList[i]['observed'], key) and \
self.comparisonList[i]['observed'].key != kwargs[key]:
include = False
break
if key == 'region' and self.comparisonList[i]['region'] != kwargs[key]:
include = False
break
# if it survived all tests it is included
if include:
clist.append(self.comparisonList[i])
indices.append(i)
# if we want to get indices of the found in the original array
if verbose:
return clist, indices
else:
return clist
def get_defined_groups(self, component=None, parameter=None):
"""
Returns a dictionary of defined groups
:param component:
:param parameter:
:return:
"""
return self.sl.get_defined_groups(component=component, parameter=parameter)
def get_degrees_of_freedom(self, l=None):
"""
Computes degrees of freadom for a given comparison list
:param l:
:return: number of degrees of freedom
"""
if l is None:
l = self.comparisonList
# number of fitted parameters
m = len(self.get_fitted_parameters())
n = 0
# number of fitted spectra points
for rec in l:
for c in rec['synthetic'].keys():
n += len(rec['synthetic'][c])
return n-m
def get_fitted_parameters(self, attribute=None):
"""
lists all fitted Parameters or a list of one
of their attributes
:param
:return:
"""
# return the list of Parameters
if attribute is None:
return self.sl.get_fitted_parameters()
else:
return [par[attribute] for par in self.sl.get_fitted_parameters()]
def get_observed_spectra_number(self):
"""
:return:
"""
if self.ol is not None:
return len(self.ol)
else:
return 0
def get_observed_spectrum(self, filename=None):
"""
Returns observed spectrum accoreding to its name.
:param filename name of the querried spectrum
:return:
"""
return self.ol.get_spectra(filename=filename)[0]
def list_comparisons(self, l=None):
"""
This function displays all comparisons.
:param l list of comparisons
:return: string
"""
if l is None:
l = self.comparisonList
string = ''
for i, rec in enumerate(l):
string += "========================= Comparison %s =========================\n" % str(i).zfill(3)
reg = rec['region']
# list region
string += 'region: %s:(%s,%s)\n' % (reg, str(self.rl.mainList[reg]['wmin']),
str(self.rl.mainList[reg]['wmax']))
# list observed spectrum
if rec['observed'] is not None:
string += "observed: %s\n" % rec['observed'].filename
else:
string += "observed: NONE\n"
# lists all parameters
for c in rec['parameters'].keys():
string += 'component: %s ' % c
# print rec['parameters'][c]
for par in rec['parameters'][c]:
string += "%s: %s " % (par['name'], str(par['value']))
string += '\n'
# list all groups
string += 'groups: %s\n' % str(rec['groups'])
string += 'chi2: %s\n' % str(rec['chi2'])
string += "==================================================================\n"
return string
def list_fitters(self):
"""
Lists all available fitters.
:return:
"""
if self.fitter is not None:
return self.fitter.list_fitters()
else:
raise AttributeError('No fitter has been attached yet.')
@staticmethod
def load(f, one4all=False):
"""
Loads the type from a file created with the save method.
:param f: the loaded file
:return:
"""
# first load the interface
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('INTERFACE') > -1:
data_start = i
break
# check that there are actually some data in the file
# the algorithm failed to load the class
if data_start >= len(lines):
warnings.warn('No interface was found was found.')
return False
# dictionary for the Interface attributes
ddicts = {}
for l in lines[1:]:
d = l.split()
# once we reach arain the Interface, we end
if l.find('INTERFACE') > -1:
break
# define record names and types
dnames = dict(
grid_parameters=['mode'],
synthetic_spectra_parameters=['order', 'step', 'padding'],
env_keys=['debug', 'adaptive_resolution']
)
dtypes = dict(
grid_parameters=[str],
synthetic_spectra_parameters=[int, float, float],
env_keys=[string2bool, string2bool]
)
# load all keys - env_vars, grid and synthetic spectra parameters
for dname in dnames.keys():
if d[0].find(dname) > -1:
# print d[0]
p = dnames[dname]
pt = dtypes[dname]
ddict = {d[i].strip(':'): d[i+1] for i in range(1, len(d), 2)}
# cast the variables to correct type
for k in ddict.keys():
i = p.index(k)
ddict[k] = pt[i](ddict[k])
# print ddict
ddicts[dname] = ddict
# print ddicts
# load the remaining data
rl = RegionList()
# print rl.load(f)
if not rl.load(f):
raise ValueError('No records on the RegionList were found in %s.' % f)
sl = StarList()
if not sl.load(f):
raise ValueError('No records on the StarList were found in %s.' % f)
fitter = Fitter()
if not fitter.load(f):
warnings.warn('No fitter was found in file %s' % f)
fitter = None
ol = ObservedList()
if not ol.load(f):
warnings.warn('No ObservedList was found in file %s' % f)
ol = None
# print ddicts
# print fitter
# setup the interface
itf = Interface(sl=sl, ol=ol, rl=rl, fitter=fitter, **ddicts['env_keys'])
itf.set_one_for_all(one4all)
gpars = {}
# print ddicts
# merge grid ans synthetic spectra parameters
for d in [ddicts['synthetic_spectra_parameters'], ddicts['grid_parameters']]:
for k in d.keys():
gpars[k] = d[k]
itf.set_grid_properties(**gpars)
itf.setup()
itf.populate_comparisons()
# self.choose_fitter(self.fitter.fittername)
# if we got here, we loaded the data
return itf
def populate_comparisons(self, l=None, demand_errors=False):
"""
Creates a synthetic spectrum for every record in
the comparisonList.
:param l
:param demand_errors
:return:
"""
if l is None:
l = self.comparisonList
# go over ech comparison in the list
for rec in l:
# get the region
region = rec['region']
# get the intensity and error
error = rec['error']
intens = rec['intens']
# go over each component
for c in rec['parameters'].keys():
pars = self.extract_parameters(rec['parameters'][c])
# use only those parameters that are not constrained with the grid
pars = {x: pars[x] for x in pars.keys() if x in self._not_given_by_grid}
# populate with the intensity vector of each component
if rec['observed'] is not None:
if demand_errors and rec['error'] is None:
raise ValueError('It is not allowed to call chi-square without having'
' uncertainties set.')
# extract the wavelength
wave = rec['wave']
# get the instrumental broadening
fwhm = rec['observed'].get_instrumental_width()
# define korelmode
korelmode = rec['observed'].korel
# generate the synthetic spectrum
rec['synthetic'][c] = self.synthetics[region][c].get_spectrum(wave=wave,
only_intensity=True,
korel=korelmode,
fwhm=fwhm,
**pars)
else:
wmin = rec['wmin']
wmax = rec['wmax']
error = None
korelmode = False
rec['synthetic'][c] = self.synthetics[region][c].get_spectrum(wmin=wmin,
wmax=wmax,
only_intensity=True,
korel=korelmode,
**pars)
# it is mandatory to provide errors for
# computation of the chi2
if error is not None:
# sum component spectra
for i, c in enumerate(rec['synthetic'].keys()):
if i == 0:
syn = rec['synthetic'][c].copy()
else:
syn = syn + rec['synthetic'][c]
# setup the chi2
rec['chi2'] = np.sum(((intens - syn) / error) ** 2)
def optimize_rv(self, fitter_name=None, groups=None, **fitter_kwargs):
"""
Optimizes radial velocities spectrum by spectrum.
:return:
"""
# turn off fitting of all parameters
for p in self.sl.get_parameter_types():
self.set_parameter(parname=p, fitted=False)
# if not defined, get rv groups
if groups is None:
groups = self.get_defined_groups(parameter='rv')
groups_list = []
for c in groups.keys():
groups_list.extend(groups[c]['rv'])
# rename back and make unique
groups = np.unique(groups_list)
# choose fitter
if fitter_name is not None:
self.choose_fitter(fitter_name, **fitter_kwargs)
# iterate over groups
for g in groups:
self.set_parameter(parname='rv', group=g, fitted=True)
l = self.get_comparisons(rv=g)
self.run_fit(l=l)
self.set_parameter(parname='rv', group=g, fitted=False)
def plot_all_comparisons(self, l=None, savefig=False, figname=None):
"""
Creates a plot of all setup comparisons.
:param l
:param savefig
:param figname
:return: None
"""
if figname is not None:
savefig = True
if l is None:
l = self.comparisonList
if len(l) == 0:
raise ValueError('The comparison list is empty. Did you run interface.setup() and interface.populate()?')
for i in range(0, len(l)):
self.plot_comparison_by_index(i, l=l, savefig=savefig, figname=figname)
def plot_comparison_by_index(self, index, l=None, savefig=False, figname=None):
"""
:param index
:param l
:param savefig
:param figname
:return:
"""
# the comparison
if l is None:
cpr = self.comparisonList[index]
else:
cpr = l[index]
# boundaries
reg = cpr['region']
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# merge the spectra
if any([cpr['synthetic'][key] is None for key in cpr['synthetic'].keys()]):
raise ValueError('The synthetic spectra are not computed. Did you run Interface.populate_comparisons()?')
si = sum_dict_keys(cpr['synthetic'])
# names
if cpr['observed'] is not None:
obsname = cpr['observed'].filename.split('/')[-1]
else:
obsname = 'NONE'
synname = ''
for c in cpr['parameters']:
synname += 'Component: %s ' % c
pdict = self.extract_parameters(cpr['parameters'][c])
synname += str({k: "%.4f" % pdict[k] for k in pdict.keys()}) + '\n'
if cpr['observed'] is not None:
try:
w, oi, ei = cpr['observed'].get_spectrum(wmin, wmax)
except:
w, oi, = cpr['observed'].get_spectrum(wmin, wmax)
ei = np.zeros(len(w))
warnings.warn('Your data observed spectrum: %s has not errors attached!')
else:
w = np.linspace(wmin, wmax, len(si))
if figname is None:
figname = "_".join([obsname, 'wmin', str(int(wmin)), 'wmax', str(int(wmax))]) + '.png'
else:
figname = "_".join([figname, obsname, 'wmin', str(int(wmin)), 'wmax', str(int(wmax))]) + '.png'
savefig = True
if self.debug:
print "Plotting comparison: observed: %s" % obsname
print "Plotting comparison: synthetics: %s" % synname
# do the plot
fig = plt.figure(figsize=(16, 10), dpi=100)
ax = fig.add_subplot(211)
if cpr['observed'] is not None:
ax.errorbar(w, oi, yerr=ei, fmt='-', color='k', label=obsname)
ax.plot(w, si, 'r-', label=synname)
ax.set_xlim(wmin, wmax)
ax.set_ylim(0.95*si.min(), 1.05*si.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=8, fancybox=True, shadow=True, bbox_to_anchor=(1.0, 1.2))
if cpr['observed'] is not None:
ax = fig.add_subplot(212)
resid = oi-si
ax.plot(w, resid, 'y', label='residuals')
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.set_xlim(wmin, wmax)
ax.set_ylim(0.95*resid.min(), 1.05*resid.max())
ax.legend(fontsize=8, loc=3)
# save the figure
if savefig:
plt.savefig(figname)
plt.close(fig)
def plot_convergence(self, f=None, parameter='chi2', component='all', group='all', savefig=True, figname=None):
"""
Plots convergence of the chi2 and parameters.
:param f
:param parameter
:param component
:param group
:param savefig
:param figname
:return:
"""
if f is None:
f = self.fitter.fitlog
if figname is not None:
savefig = True
# read the log
log = read_fitlog(f)
block = []
labels = []
# set the plotted parameters
if parameter.lower() == 'all':
parameters = np.unique(log['name'])
else:
parameters = [parameter]
if component.lower() == 'all':
components = np.unique(log['component'])
else:
components = [component]
if group.lower() == 'all':
groups = np.unique(log['group'])
else:
groups = [group]
# select those mathcing the choice
i = 0
for p, c, g in zip(log['name'], log['component'], log['group']):
if p not in parameters:
i += 1
continue
elif c not in components:
i += 1
continue
elif g not in groups:
i += 1
continue
else:
label = '_'.join(['p', p, 'c', c, 'g', str(g)])
labels.append(label)
block.append(log['data'][:, i])
i += 1
# append chi_square
if (parameter.lower() in ['chi2']) | (parameter == 'all'):
block.append(log['data'][:, -1])
labels.append('chi2')
# print labels
plot_convergence(np.column_stack(block), labels, figname=figname, savefig=savefig)
@staticmethod
def plot_convergence_mcmc(f='chain.dat', parameters='all', components='all', groups='all',
savefig=True, figname=None):
"""
Plots convergence of a mcmc_chain
:param f:
:param parameters:
:param components:
:param groups:
:param savefig:
:param figname:
:return:
"""
# load data
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters == 'all':
parameters = np.unique(log['name'])
if components == 'all':
components = np.unique(log['component'])
if groups == 'all':
groups = np.unique(log['group'])
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
# an array for empty indices.
indices = []
labels = []
i = 0
# fill the array of indices
for p, c, g in zip(log['name'], log['component'], log['group']):
# do only the desired ones
for v, vals in zip([p, c, g], [parameters, components, groups]):
# print v, vals
if v not in vals:
i += 1
break
indices.append(i)
labels.append('_'.join(['c', c, 'p', p, 'g', str(g)]))
i += 1
# do the plot
# print len(indices), len(labels)
plot_walkers(log['data'], niter, nwalkers, indices=indices,
labels=labels, savefig=savefig, figname=figname)
@staticmethod
def plot_covariances_mcmc(f='chain.dat', l=None, treshold=100, parameters=None,
components=None, groups=None, nbin=20, savefig=True, figname=None):
"""
Plots covariances between selected parameters
:param f
:param l
:param treshold
:param parameters
:param components
:param groups
:param nbin
:param savefig
:param figname
:return:
"""
if figname is not None:
savefig = True
# reads the chan
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters is None:
parameters = np.unique(log['name'])
if components is None:
components = np.unique(log['component'])
if groups is None:
groups = np.unique(log['group'])
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
# take only the part, where the sampler is burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# select those matching the choice
indices = []
labels = []
i = 0
# fill the array of indices
for p, c, g in zip(log['name'], log['component'], log['group']):
# do only the desired ones
saveind = True
for v, vals in zip([p, c, g], [parameters, components, groups]):
if v not in vals:
saveind = False
break
if saveind:
indices.append(i)
labels.append('_'.join(['c', c, 'p', p, 'g', str(g)]))
i += 1
# do the corner plot
corner.corner(log['data'][:,indices], bins=nbin, labels=labels,
quantiles=(0.67*np.ones(len(indices))).tolist(),
truths=(np.zeros(len(indices))).tolist()
)
# save the figure
if savefig:
if figname is None:
figname = 'correlations.png'
plt.savefig(figname)
@staticmethod
def plot_variances_mcmc(f=None, l=None, parameters=None, components=None, groups=None, nbin=20,
treshold=100, savefig=True, figname=None):
"""
Plots covariances between selected parameters
:param f
:param l
:param treshold
:param parameters
:param components
:param groups
:param nbin
:param savefig
:param fignamez
:return:
"""
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
if figname is not None:
savefig = True
# reads the chan
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters is None:
parameters = np.unique(log['name'])
if components is None:
components = np.unique(log['component'])
if groups is None:
groups = np.unique(log['group'])
# take only the part, where the sampler is burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# select those mathcing the choice
npar = len(log['name'])
for i in range(1, npar):
for j in range(0, i):
# extract individual values
p1 = log['name'][i]
c1 = log['component'][i]
g1 = log['group'][i]
# end if there are no components matching our
# choice of components, groups and parameters
if any([p.lower() not in parameters for p in [p1]]):
continue
if any([c.lower() not in components for c in [c1]]):
continue
if any([g not in groups for g in [g1]]):
continue
# setup labels
label1 = '_'.join(['p', p1, 'c', c1, 'g', str(g1).zfill(2)])
# setup plotted data
x = log['data'][:, i]
# do the oplot
plot_variance(x,nbin=nbin, label=label1, savefig=savefig, figname=figname)
def propagate_and_update_parameters(self, l, pars):
"""
:param l
:param pars
:return:
"""
# parameters are passed by reference, so
# this should also change the starlist
# and corresponding
fitpars = self.sl.get_fitted_parameters()
if len(pars) != len(fitpars):
raise ValueError('Length of the vector passed with the fitting environment does '
'mot match length of the parameters marked as fitted.')
for i, v in enumerate(pars):
fitpars[i]['value'] = v
# we have to recompute the synthetic spectra
# if one grid parameter was passed
# first check for which parameters
# the grid parameters are fitted
components_to_update = []
for c in self.sl.fitted_types.keys():
for rec in self.sl.fitted_types[c]:
# recompute only those components for those
# grid parameter is fitted
if rec not in self._not_given_by_grid:
components_to_update.append(c)
# update the synthetic spectra
if len(components_to_update) > 0:
self.ready_synthetic_spectra(complist=components_to_update)
# populate the comparison
self.populate_comparisons(l=l, demand_errors=True)
def ready_synthetic_spectra(self, complist=[]):
"""
Readies the synthetic spectra for each region.
:param complist list of components that will be re-computed,
:return:
"""
# if there is no list of components
# for which to set the synthetic
# parameters
if len(complist) == 0:
complist = self.sl.get_components()
# regime in which we use one long spectrum
if self.one4all:
wl = self.rl.get_wavelengths()
wmin = np.min(wl)
wmax = np.max(wl)
for reg in self.rl._registered_regions:
# add the region to synthetics
if reg not in self.synthetics.keys():
self.synthetics[reg] = dict()
# wavelength_boundaries
if not self.one4all:
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# get all parameters for a given region
reg_groups = self.rl.mainList[reg]['groups'][0]
reg_groups = {x: reg_groups[x] for x in reg_groups.keys()
if x not in self._not_given_by_grid}
grid_pars = [x for x in self.sl.get_physical_parameters()
if x not in self._not_given_by_grid]
# setup default groups - ie zero
for par in grid_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# get list of Parameters
parlist = self.sl.get_parameter(**reg_groups)
for c in complist:
# convert Parameter list to dictionary
params = self.extract_parameters(parlist[c])
# print params
# padding has to be relatively large, since
# we do not know what the rvs will be
if self.debug:
print "Creating SyntheticSpectrum: params: %s wmin: %s wmax: %s" % (str(params),
str(wmin),
str(wmax))
if not self.one4all:
self.synthetics[reg][c] = self.grids[reg].get_synthetic_spectrum(params,
np.array([wmin, wmax]),
**self._synthetic_spectrum_kwargs)
else:
self.synthetics[reg][c] = self.grids['all'].get_synthetic_spectrum(params,
np.array([wmin, wmax]),
**self._synthetic_spectrum_kwargs)
def read_chi2_from_comparisons(self, l=None, verbose=False):
"""
Reads the chi-squares from the list.
:param l:
:return:
"""
# work with the min comparisonList if no other
# is provided
if l is None:
l = self.comparisonList
chi2 = 0.0
if verbose:
chi2_detailed = []
# read out the chi squares
for i in range(0, len(l)):
chi2 += l[i]['chi2']
# if verbosity is desired a detailed chi-square
# info on each region is returned
if verbose:
chi2_detailed.append(dict(chi2=l[i]['chi2'],
region=self.rl.mainList[l[i]['region']],
rv_group=l[i]['groups']['rv']))
if verbose:
return chi2, chi2_detailed
else:
return chi2
def ready_comparisons(self):
"""
This function creates a dictionary, which is one of the
cornerstones of the class. It creates a list of all
combinations of the parameters.
:return:
"""
# start a list of comparisons that will
# be carried out with the given dataset
self.comparisonList = []
# go region by region
for reg in self.rl.mainList.keys():
# fitted region
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# region-dfined groups and parameters
reg_groups = copy.deepcopy(self.rl.mainList[reg]['groups'][0])
phys_pars = [x for x in self.sl.get_physical_parameters() if x not in ['rv']]
# print reg, phys_pars, reg_groups
# if the group is not defined, it is zero
for par in phys_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# create a list of unique rv groups
rv_groups = self.sl.get_defined_groups(parameter='rv')
rv_groups = [rv_groups[key]['rv'] for key in rv_groups.keys()]
temp = []
for row in rv_groups:
temp.extend(row)
rv_groups = np.unique(temp)
for rv_group in rv_groups:
# append rv_group to groups
all_groups = copy.deepcopy(reg_groups)
all_groups['rv'] = rv_group
# append rv parameter to the remaining parameters
# rv_pars = self.sl.get_parameter(rv=rv_group)
# get unique set of parameters for a given group
all_pars = self.sl.get_parameter(**all_groups)
# for c in rv_pars.keys():
# all_pars[c].extend(rv_pars[c])
if self.ol is not None:
if rv_group not in self.rel_rvgroup_region[reg]:
continue
# the wmin wmax is used to check again that
# we are in the correct region.
if self.debug:
print "Queried parameters in ready comparisons:", wmin, wmax, rv_group
obs = self.ol.get_spectra(wmin=wmin, wmax=wmax, rv=rv_group)
if len(obs) == 0:
continue
else:
obs = [None]
# add the comparison for each observed spectrum
# because in an unlikely event, when we fit the
# same RVs for several spectra
for o in obs:
# What if we are only generating spectra???
# If there are spectra attached we are
# comparing and thats it!!
if o is None:
c = 'all'
else:
c = o.component
if c != 'all':
temp_all_pars = {c: all_pars[c]}
else:
temp_all_pars = all_pars
self.add_comparison(region=reg,
parameters=temp_all_pars,
groups=all_groups,
observed=o,
)
def ready_comparisons_spectrum_by_spectrum(self):
"""
This function creates a dictionary, which is one of the
cornerstones of the class. It creates a list of all
combinations of the parameters.
:return:
"""
# print self
# start a list of comparisons that will
# be carried out with the given dataset
self.comparisonList = []
# go region by region
for reg in self.rl.mainList.keys():
# fitted region
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# generate a dictionary of unique groups for each parameter
unique_groups = {}
# phys_pars = [par for par in self.sl.get_physical_parameters() if par not in ['lr']]
phys_pars = self.sl.get_physical_parameters()
for par in phys_pars:
groups = self.sl.get_defined_groups(parameter=par)
temp = []
for c in groups.keys():
print groups[c][par]
temp.extend(groups[c][par])
unique_groups[par] = np.unique(temp).tolist()
# print unique_groups
# position in the row of each parameter
position = {key: 0 for key in unique_groups.keys()}
keys = unique_groups.keys()
# print position
# print unique_groups
# THIS IS PROBABLY THE MOST IDIOTIC WAY HOW TO GET
# ALL COMBINATIONS BETWEEN RECORDS IN N DIFFERENT LISTS
# SURPRISINGLY IT DOES NOT GENERATE REDUNDANT COMPARISONS
# It iterates over the positions list until for each
# record in the list position[i] == len(unique_groups[i])
# both are dictionaries of course
i = 0
all_groups_list = []
# while position[keys[-1]] >= len(unique_groups[keys[-1]])-1:
while True:
# append the current groups
temp = {key: unique_groups[key][position[key]] for key in keys}
all_groups_list.append(temp)
# search until you find a list of lenght > 1 or till the end
while i < len(keys) and (position[keys[i]] == len(unique_groups[keys[i]])-1):
i += 1
# if end was reached - end
if not i < len(keys):
break
else:
# else increment the record and start over
position[keys[i]] += 1
for j in range(0, i):
position[keys[j]] = 0
i = 0
# for rec in all_groups_list:
# print rec
for rec in all_groups_list:
# get unique set of parameters for a given group
all_pars = self.sl.get_parameter(**rec)
if self.ol is not None:
# if rv_group not in self.rel_rvgroup_region[reg]:
# continue
# the wmin wmax is used to check again that
# we are in the correct region.
obs = self.ol.get_spectra(wmin=wmin, wmax=wmax, permissive=True, **rec)
if len(obs) == 0:
continue
else:
obs = [None]
# add the comparison for each observed spectrum
# because in an unlikely event, when we fit the
# same RVs for several spectra
for o in obs:
# What if we are only generating spectra???
# If there are spectra attached we are
# comparing and thats it!!
if o is None:
c = 'all'
else:
c = o.component
if c != 'all':
temp_all_pars = {c: all_pars[c]}
else:
temp_all_pars = all_pars
self.add_comparison(region=reg,
parameters=temp_all_pars,
groups=rec,
observed=o,
)
def remove_parameter(self, component, parameter, group):
"""
:param component: component for which the parameter is deleted
:param parameter:deleted paramer
:param group
:return:
"""
self.sl.remove_parameter(component, parameter, group)
def run_fit(self, l=None, verbose=False):
"""
Starts the fitting
:param l:
:param verbose:
:return:
"""
# update fitted parameters
self.update_fitter()
# set the identification of fitted parameters
self.fitter.set_fit_properties(self.sl.get_fitted_parameters(True)[1])
# this starts recording of each iteration chi2
self.fit_is_running = True
# runs the fitting
self.fitter(self.compute_chi2, l, verbose)
# copy the fit into the whole structure
self.accept_fit()
# writes the remaining iterations within the file
self.fitter.flush_iters()
# turn of the fitting
self.fit_is_running = False
def run_bootstrap(self, limits, outputname=None, decouple_rv=True, niter=100, sub_niter=3):
"""
Runs bootstrap simulation to estimate the errors. The initial parameter set is chosen
randomly in the vicinity of the solution that is stored within the Interface type.
:param limits: format dict(component1=dict(rv=[low, high], teff=[low, high],..),
component2=dict(..), ..), where the range in which the random number is
(stored_value - low, stored_value + high).
:param outputname: Prefix name for result of each bootstrap iteration.
:param decouple_rv: Should the rvs be fitted separately from the remaining parameters?
:param niter: Number of bootstrap iteration.
:param sub_niter: Number of subiteration, where rv is fitted first and then the
remaining parameters. This parameter is irrelevant for decouple_rv = False.
:return:
"""
# set outputname of each iteration
if outputname is None:
outputname = 'bootstrap'
# niter samples are computed
for i in range(niter):
# create an interface with a random data sample
itf = self.draw_random_sample()
# set a random starting point within limits
for c in limits.keys():
for p in limits[c].keys():
# user supplied limits
bs_vmin = limits[c][p][0]
bs_vmax = limits[c][p][1]
# get all defined groups
groups = itf.get_defined_groups(component=c, parameter=p)[c][p]
# for each group set random starting point
for g in groups:
# for each group, parameter and component
# get value, minimal and maximal
par = itf.sl.get_parameter(**{p : g})[c][0]
value = par.value
vmin = par.vmin
vmax = par.vmax
# set boundaries where random number is drawn
llim = max([value - bs_vmin, vmin])
ulim = min([value + bs_vmax, vmax])
# draw the random number
rn = llim + (ulim - llim) * np.random.random()
# set it to parameter
par.value = rn
par.vmin = max([vmin, value - 2 * bs_vmin])
par.vmax = min([vmax, value + 2 * bs_vmax])
# set outputname for one fit
outputname_one_iter = '.'.join([outputname, str(i).zfill(3), 'sav'])
# get list of fitted parameters
fitpars = {}
for c in itf.sl.componentList.keys():
fitpars[c] = []
for p in itf.sl.componentList[c].keys():
for k in range(0, len(itf.sl.componentList[c][p])):
if itf.sl.componentList[c][p][k].fitted:
fitpars[c].append(p)
break
#sys.exit(0)
# now proceed with the fittingss
itf.save('.'.join([outputname, 'initial', str(i).zfill(3), 'sav']))
if decouple_rv:
# do several iterations, fitting rv and remaining parameters
for j in range(sub_niter):
# turn off fitting of radial velocity
itf.set_parameter(parname='rv', fitted=False)
# turn on remaining parameters
for c in fitpars.keys():
for p in fitpars[c]:
itf.set_parameter(parname=p, component=c, fitted=True)
# run the fit - not radial velocities
itf.run_fit()
#print itf
#print itf.list_comparisons()
# itf.save('.'.join(['before_rv', str(i).zfill(3), str(j).zfill(2), 'sav']))
# run the fit - radial velocities
itf.optimize_rv()
#print itf
#print itf.list_comparisons()
# itf.save('.'.join(['after_rv', str(i).zfill(3), str(j).zfill(2), 'sav']))
else:
itf.run_fit()
# save the result
itf.save(outputname_one_iter)
def run_mcmc(self, chain_file='chain.dat', nwalkers=None, niter=500, l=None, verbose=False):
"""
Runs the mcmc error estimation.
:return:
"""
# pass on the fit properties
self.fitter.set_fit_properties(self.sl.get_fitted_parameters(True)[1])
# update the boundaries
vmins = self.get_fitted_parameters(attribute='vmin')
vmaxs = self.get_fitted_parameters(attribute='vmax')
self.fitter.set_lower_boundary(vmins)
self.fitter.set_upper_boundary(vmaxs)
# get the values
vals = self.get_fitted_parameters(attribute='value')
# set up number of walkers
if nwalkers is None:
nwalkers = 4*len(vals)
# run the mcmc sampling
self.fitter.run_mcmc(self.compute_chi2, chain_file, vals, nwalkers, niter, l, verbose)
def save(self, ofile):
"""
Saves the interface as a text file.
:param ofile: file or filehandler
:return:
"""
# open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w')
# Setup the interface variables first.
string = ' INTERFACE '.rjust(105, '#').ljust(200, '#') + '\n'
# set the grid properities
string += 'grid_parameters: '
for key in self._grid_kwargs.keys():
if key not in ['debug']:
string += '%s: %s ' % (key, str(self._grid_kwargs[key]))
string += '\n'
# set the synthetic spectra parameters
string += 'synthetic_spectra_parameters: '
for key in self._synthetic_spectrum_kwargs.keys():
string += '%s: %s ' % (key, str(self._synthetic_spectrum_kwargs[key]))
string += '\n'
# Set the environmental keys
enviromental_keys = ['adaptive_resolution', 'debug']
string += 'env_keys: '
for ekey in enviromental_keys:
string += "%s: %s " % (ekey, str(getattr(self, ekey)))
string += '\n'
# finalize the string
string += ' INTERFACE '.rjust(105, '#').ljust(200, '#') + '\n'
ofile.writelines(string)
# save the starlist
self.sl.save(ofile)
# save the fitter
self.fitter.save(ofile)
# save the regions
self.rl.save(ofile)
# save the observed list - if any was given
# and compute the chi-square
if self.ol is not None:
# saves the observed list
self.ol.save(ofile)
# saves the chi-square and degrees of freedom
string = ' CHI-SQUARE '.rjust(105, '#').ljust(200, '#') + '\n'
# compute chi2 and ddof
chi2 = self.compute_chi2()
ddof = self.get_degrees_of_freedom()
# save it within the asc file
string += 'Chi^2: %s Degrees_Of_Freedom: %s Reduced Chi^2: %s\n' % \
(str(chi2), str(ddof), str(chi2 / ddof))
string += ' CHI-SQUARE '.rjust(105, '#').ljust(200, '#') + '\n'
ofile.writelines(string)
def setup(self):
"""
This function probes the observed and
region list and propagates group definitions
from them to the starlist.
:return:
"""
# first setup region groups
if self.rl is not None:
region_groups = self.rl.get_region_groups()
self.sl.set_groups(region_groups)
else:
self.rl = RegionList(debug=self.debug)
self.rl.get_regions_from_obs(copy.deepcopy(self.ol.observedSpectraList['spectrum']))
# TODO setting up the region <-> rv relation better - this is a quick fix
# TODO and unlikely a robust one
self.rel_rvgroup_region = {reg: [0] for reg in self.rl.get_registered_regions()}
region_groups = self.rl.get_region_groups()
self.sl.set_groups(region_groups)
# print self
# setup radial velocity groups
if self.ol is not None:
# we will fit some parameters separately at some spectra
# therefore all groups are assihgne dfrom the data, not only
# the radial velocities
# check that all fitted spectra fit within at least one
# spectral region
self.verify_spectra_and_regions()
if self.spectrum_by_spectrum is not None:
# setup groups for each spectrum
# relative luminosity is given by spectra region, not the spectrum itself
phys_pars = [par for par in self.sl.get_physical_parameters() if par not in 'lr']
# parameters that will be owned by each spectrum
varparams = self.spectrum_by_spectrum
# common parameters
fixparams = [par for par in phys_pars if par not in self.spectrum_by_spectrum]
self._set_groups_to_observed(varparams, fixparams)
self._setup_all_groups()
else:
# print self
self._setup_rv_groups()
# print self
# setup the wavelength step of synthetic spectra
# from observed psectra
if self.adaptive_resolution:
step = self.ol.get_resolution()
if self.debug:
print "The step size of the grid is: %s Angstrom." % str(step/2.)
self.set_grid_properties(step=step/2.)
else:
warnings.warn('There are no data attached, so all regions are set to '
'have the same radial velocity. Each component can have'
'different velocity of course.')
# attach grids to the interface
self._setup_grids()
# create the basic interpolated spectra
self.ready_synthetic_spectra()
# setup all comparisons
if self.spectrum_by_spectrum is not None:
self.ready_comparisons_spectrum_by_spectrum()
else:
self.ready_comparisons()
# setup fitter
if self.fitter is None:
self.fitter = Fitter(debug=self.debug)
# at the end the comparisons synthetic spectra are populated
self.populate_comparisons()
def set_grid_properties(self, **kwargs):
"""
:param kwargs: padding - number of spectra to use for
padding of synthetic spectra
:param kwargs: order - maximal number of spectra
for interpolation
:return:
"""
# if we pass step, we turn off
# adaptive resolution
if 'step' in kwargs.keys():
self.adaptive_resolution = False
for k in kwargs.keys():
# setup grid parameters
if k in self._grid_kwargs.keys():
self._grid_kwargs[k] = kwargs[k]
# setup synthetic spectra parameters
elif k in self._synthetic_spectrum_kwargs.keys():
self._synthetic_spectrum_kwargs[k] = kwargs[k]
else:
raise KeyError('Key: %s is not a property of either the grid or synthetic spectra. '
'The only parameters adjustable with this function are: '
' %s for grid and % for synthetic spectra.'
% (k,
str(self._grid_kwargs.keys()),
str(self._synthetic_spectrum_kwargs)))
def _set_groups_to_observed(self, varparams, fixparams):
"""
:param varparams parameters whose group number should vary from spectrum to spectrum
:param fixparams parameters whose group should be the same for all spectra
:return:
"""
if self.ol is None:
raise AttributeError('No data are attached.')
else:
for i in range(0, len(self.ol)):
# setup varying parameters
for vpar in varparams:
if vpar not in self.ol.observedSpectraList['group'].keys():
self.ol.observedSpectraList['group'][vpar] = np.zeros(len(self.ol))
self.ol.observedSpectraList['group'][vpar][i] = i+1
# setup fixed parameters
for fpar in fixparams:
if fpar not in self.ol.observedSpectraList['group'].keys():
self.ol.observedSpectraList['group'][fpar] = np.zeros(len(self.ol))
self.ol.observedSpectraList['group'][fpar][i] = 0
# set the groups from table to spectra
self.ol._set_groups_to_spectra()
def set_one_for_all(self, switch):
"""
Sets usage of one grid for all regions. This is faster.
When we do not have lots of empty regions between fitted
regions. It reduces number of spectra loading required
but increases the duration of interpolation,
:param switch turn on/off the fitting
:return:
"""
if not isinstance(switch, (bool, int)):
raise TypeError('Switch of the one4all regime must have type bool.')
self.one4all = switch
self._setup_grids()
def set_parameter(self, component='all', parname=None, group='all', **kwargs):
"""
:param component:
:param parname
:param group:
:param kwargs: keywords to be set up for each parameter
:return:
"""
# check the results
if parname is None:
print "I cannot adjust parameter: %s." % str(parname)
if len(kwargs.keys()) == 0:
return
# setup the components
if component == 'all':
component = self.sl.get_components()
else:
component = [component]
# create a list of unique groups if all are needed
if group is 'all':
groups = []
dict_groups = self.sl.get_defined_groups(parameter=parname)
for c in dict_groups.keys():
groups.extend(dict_groups[c][parname])
groups = np.unique(groups)
else:
groups = [group]
# propagate to the star
for c in component:
for g in groups:
# print c, g, kwargs
self.sl.set_parameter(parname, c, g, **kwargs)
# print self
# recompute synthetic spectra
if (parname not in self._not_given_by_grid) & ('value' in kwargs.keys()):
self.ready_synthetic_spectra()
# update the fitter if number of fitted
# parameters changes
if 'fitted' in kwargs.keys() and self.fitter.fittername is not None:
fitparams = self.get_fitted_parameters()
self.choose_fitter(name=self.fitter.fittername, fitparams=fitparams, **self.fitter.fit_kwargs)
def set_error(self, parname='rv', component=None, error=1.0):
"""
Sets error by adjusting vmin, vmax,
:param parname: name of the parameter
:paramn components
:param error: the error, ehich will be used to set boundaries
:return:
"""
if component is not None:
components = [component]
else:
components = self.sl._registered_components
# get all fitted parameters
parname = parname.lower()
for c in components:
if parname in self.sl.componentList[c].keys():
for p in self.sl.componentList[c][parname]:
v = p['value']
# relative luminosity needs special treatment
if p['name'] == 'lr':
p['vmin'] = max([0.0, v - error])
p['vmax'] = min([1.0, v + error])
# and so does also the rotational velocity
elif p['name'] == 'vrot':
p['vmin'] = max([0.0, v - error])
p['vmax'] = v + error
# and the rest is simple
else:
p['vmin'] = v - error
p['vmax'] = v + error
def _setup_grids(self):
"""
Initializes grid of synthetic spectra for each region -
i.e. there is no point in calling the function without
having the regions set up.
:params kwargs -see pyterpol.
:return:
"""
if not self.one4all:
for reg in self.rl.mainList.keys():
self.grids[reg] = SyntheticGrid(**self._grid_kwargs)
else:
# assume that there is only one grid for all
self.grids['all'] = SyntheticGrid(**self._grid_kwargs)
def _setup_rv_groups(self):
"""
Setting up the rv_groups is a pain..
:return:
"""
# TODO Can this be done better?????
# empty array for components where cloning
# was performed - to get rid of the first
# group
cloned_comps = []
registered_groups = []
# dictionary for newly registered groups
# this is necessary in case we do not
# the newly registered groups have to
# be assigned back to the spectra
# otherwise we would not know which rv
# belongs to which spectrum
new_groups = dict()
# get wavelength boundaries of defined regions
wmins, wmaxs, regs = self.rl.get_wavelengths(verbose=True)
# this dictionary is needed to have
# unambiguous relationship between
# rv_group, spectrum and region
reg2rv = {x: [] for x in regs}
# for every region we have a look if we have some datas
for wmin, wmax, reg in zip(wmins, wmaxs, regs):
# query spectra for each region
observed_spectra = self.ol.get_spectra(wmin=wmin, wmax=wmax)
for i, spectrum in enumerate(observed_spectra):
# read out properties of spectra
component = spectrum.component
# there can be more spectral groups
rv_groups = spectrum.group['rv']
if not isinstance(rv_groups, (list, tuple)):
rv_groups = [rv_groups]
for rv_group in rv_groups:
# readout groups that were already defined for all components
def_groups = self.sl.get_defined_groups(component='all', parameter='rv')['all']['rv']
# We define group for our observation
if rv_group is None:
gn = generate_least_number(def_groups)
reg2rv[reg].append(gn)
# save the newly registered group
if spectrum.filename not in new_groups.keys():
new_groups[spectrum.filename] = []
new_groups[spectrum.filename].append(gn)
elif rv_group not in def_groups:
gn = rv_group
reg2rv[reg].append(rv_group)
# if the group is defined we only need to
# add it among the user defined one, so it
# so it is not deleted later
elif rv_group in def_groups:
registered_groups.append(rv_group)
reg2rv[reg].append(rv_group)
continue
# attachs new parameter to the StarList
# print component, gn
self.sl.clone_parameter(component, 'rv', group=gn)
if component not in cloned_comps:
if component == 'all':
cloned_comps.extend(self.sl.get_components())
else:
cloned_comps.append(component)
registered_groups.append(gn)
# print registered_groups, cloned_comps
# remove the default groups
for c in cloned_comps:
gref = self.sl.componentList[c]['rv'][0]['group']
if gref not in registered_groups:
self.remove_parameter(c, 'rv', gref)
# back register the group numbers to the observed spectra
for filename in new_groups.keys():
# print new_groups[filename]
self.ol.set_spectrum(filename=filename, group={'rv': new_groups[filename]})
# print self
# finalize the list of rv_groups for each region
self.rel_rvgroup_region = {x: np.unique(reg2rv[x]).tolist() for x in reg2rv.keys()}
def _setup_all_groups(self):
"""
Setting up all groups from observations is even a bigger pain.
:return:
"""
# get wavelength boundaries of defined regions
wmins, wmaxs, regs = self.rl.get_wavelengths(verbose=True)
# this dictionary is needed to have
# unambiguous relationship between
# rv_group, spectrum and region
reg2rv = {x: [] for x in regs}
# physical parameters
phys_pars = self.sl.get_physical_parameters()
phys_pars = [par for par in phys_pars if par not in ['lr']]
# for every region we have a look if we have some datas
for p_par in phys_pars:
new_groups = dict()
cloned_comps = []
registered_groups = []
for wmin, wmax, reg in zip(wmins, wmaxs, regs):
# query spectra for each region
observed_spectra = self.ol.get_spectra(wmin=wmin, wmax=wmax)
# go over each observed spectrum
for i, spectrum in enumerate(observed_spectra):
# read out properties of spectra
component = spectrum.component
# if the group is not defined for the s
if p_par in spectrum.group.keys():
p_group = copy.deepcopy(spectrum.group[p_par])
else:
# self.ol.set_spectrum(spectrum.filename, group={p_par:0})
p_group = None
# readout groups that were already defined for all components
def_groups = self.sl.get_defined_groups(component='all', parameter=p_par)['all'][p_par]
# print p_par, def_groups
# We define group for our observation
if p_group is None:
if p_par == 'rv':
gn = generate_least_number(def_groups)
reg2rv[reg].append(gn)
# for other than rvs, the default group is 0
else:
# self.ol.set_spectrum(filename=spectrum.filename, group={p_par: 0})
# spectrum.group[p_par]=0
continue
# save the newly registered group
if spectrum.filename not in new_groups.keys():
new_groups[spectrum.filename] = []
new_groups[spectrum.filename].append(gn)
elif p_group not in def_groups:
gn = p_group
reg2rv[reg].append(p_group)
# if the group is defined we only need to
# add it among the user defined one, so it
# so it is not deleted later
elif p_group in def_groups:
registered_groups.append(p_group)
reg2rv[reg].append(p_group)
continue
# attachs new parameter to the StarList
# print component, gn
self.sl.clone_parameter(component, p_par, group=gn)
if component not in cloned_comps:
if component == 'all':
cloned_comps.extend(self.sl.get_components())
else:
cloned_comps.append(component)
registered_groups.append(gn)
# print registered_groups, cloned_comps
# remove the default groups
for c in cloned_comps:
gref = self.sl.componentList[c][p_par][0]['group']
if gref not in registered_groups:
self.remove_parameter(c, p_par, gref)
# print new_groups
# back register the group numbers to the observed spectra
for filename in new_groups.keys():
# print p_par, new_groups
self.ol.set_spectrum(filename=filename, group={'rv': new_groups[filename]})
# finalize the list of rv_groups for each region
self.rel_rvgroup_region = {x: np.unique(reg2rv[x]).tolist() for x in reg2rv.keys()}
def update_fitter(self):
"""
Pass the fitted parameters to the fitter.
:return:
"""
# get the fitted parameters
fitpars = self.get_fitted_parameters()
name = self.fitter.fittername
kwargs = self.fitter.fit_kwargs
# update the fitted parameters
self.choose_fitter(name, fitparams=fitpars, **kwargs)
def verify_spectra_and_regions(self):
"""
Checks that all fitted spectra fit into at least one region.
If not an error is raised
:return:
"""
# get all defined regions
wmins, wmaxs = self.rl.get_wavelengths()
# go over each spectrum
for spectrum in self.ol.observedSpectraList['spectrum']:
wave = spectrum.get_wavelength()
owmin = wave.min()
owmax = wave.max()
# check whether the spectrum fits into at least one
# region
is_within = False
for wmin, wmax in zip(wmins, wmaxs):
if (wmin > owmin) & (wmax < owmax):
is_within = True
break
if not is_within:
warnings.warn('The spectrum:\n%s does not fit into any defined spectral region. These '
'spectra will be excluded from fitting.' % str(spectrum))
@staticmethod
def write_mc_result(f, treshold=100, outputname='fit.res'):
"""
Writes the result of fitting
:param f a fitting log
:param outputname
:param treshold
:return:
"""
# returns a dictionary of fitted parameters and their uncertainties
pars = Interface.evaluate_mcmc(f, treshold=treshold)
# creates the output string
string = ''
for c in pars.keys():
for p in pars[c].keys():
for row in pars[c][p]:
string += 'c:%15s p:%6s ' % (c, p)
string += 'g:%3i ' % (row['group'])
for key in ['best', 'gauss_mean', 'gauss_sigma', 'lower', 'upper']:
string += "%6s: %10.4f " % (key, row[key])
string += '\n'
# writes it to a file
ofile = open(outputname, 'w')
ofile.writelines([string])
ofile.close()
def write_rvs(self, outputname=None):
"""
Writes RVs defined to all groups --- usually there
is only one spectrum per group.
:param outputname: file where the output is written
:return: rvs -- radial velocities per component and group,
allgroup -- a list of all defined rv groups
names --list of spectra names
"""
# get define groups
groups = self.get_defined_groups(component='all', parameter='rv')
# get a parameter
components = self.sl._registered_components
# get a list of unique groups
allgroups = []
for c in components:
allgroups.extend(groups[c]['rv'])
allgroups = np.unique(allgroups)
# get all components for a given group
rvs = {c: [] for c in components}
names = []
hjds = []
groups = []
for i, g in enumerate(allgroups):
# get all observed spectra corresponding to the group
obspecs = self.ol.get_spectra(rv=g)
# get the radial velocities
pars = self.sl.get_parameter(rv=g)
for obspec in obspecs:
for j, c in enumerate(components):
# append radial velocity
if c in pars.keys():
rvs[c].append(pars[c][0]['value'])
# if an component is missing -9999.999 is assigned instead
else:
rvs[c].append(-9999.9999)
# append name and hjd and group
names.append(obspec.filename)
hjds.append(obspec.hjd)
groups.append(g)
if outputname is not None:
# opent the file
ofile = open(outputname, 'w')
# switch for writing hjds
has_hjd = any([x is not None for x in hjds])
# write the header
if has_hjd:
ofile.write("#%5s%20s%20s" % ('GROUP', 'FILENAME', 'HJD'))
else:
ofile.write("#%5s%20s" % ('GROUP', 'FILENAME'))
for j in range(0, len(components)):
ofile.write("%15s" % components[j].upper())
ofile.write('\n')
# write teh rvs
for i in range(0, len(names)):
# what of HJD is not assigned
if has_hjd:
ofile.write("%6s%20s%20s" % (str(groups[i]).zfill(3), names[i], str(hjds[i])))
else:
ofile.write("%6s%20s" % (str(groups[i]).zfill(3), names[i]))
for c in components:
ofile.write("%15.6f" % rvs[c][i])
ofile.write('\n')
return rvs, allgroups, names
def write_shifted_spectra(self, outputfile=None, residuals=False):
"""
:return:
"""
# setup name prefix
if outputfile is None:
outputfile = ''
# go over each record within comparisonList
for cp in self.comparisonList:
if residuals:
outputfile = cp['observed'].filename
# extract description of the comparison
wave = cp['wave']
intens = sum_dict_keys(cp['synthetic'])
wmin = cp['wmin']
wmax = cp['wmax']
component = cp['observed'].component
korel = cp['observed'].korel
rvgroup = cp['groups']['rv']
# set name
name = '_'.join([outputfile, 'c', component, 'wmin', str(wmin), 'wmax', str(wmax), 'g', str(rvgroup)]) \
+ '.dat'
# construct header of the file
header = ''
header += '# Component: %s\n' % str(component)
header += '# Region: (%s,%s)\n' % (str(wmin), str(wmax))
header += '# KOREL: %s\n' % str(korel)
header += '# Residual: %s\n' % str(residuals)
# write the synthetic spectrum
ofile = open(name, 'w')
ofile.writelines(header)
if residuals:
oi = cp['observed'].get_spectrum(wmin, wmax)[1]
np.savetxt(ofile, np.column_stack([wave, oi - intens]), fmt='%15.8e')
else:
np.savetxt(ofile, np.column_stack([wave, intens]), fmt='%15.8e')
ofile.close()
def write_synthetic_spectra(self, component=None, region=None, rvgroups=None, outputname=None, korel=False):
"""
Writes the synthetic spectra obtained through the fitting.
:param component
:param region
:param outputname
:param korel
:return:
"""
# set defaults for component
if component is None:
components = self.sl.get_components()
if isinstance(component, str):
components = [component]
# set defaults for region
if region is None:
regions = self.rl.get_registered_regions()
if isinstance(region, str):
regions = [region]
# go over each region
for r in regions:
# get the wavelengths
wmin = self.rl.mainList[r]['wmin']
wmax = self.rl.mainList[r]['wmax']
# get defined groups for the region
reg_groups = copy.deepcopy(self.rl.mainList[r]['groups'][0])
phys_pars = [x for x in self.sl.get_physical_parameters() if x not in ['rv']]
for par in phys_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# get regional parameters
reg_pars = self.sl.get_parameter(**reg_groups)
for c in components:
# get defined rv groups
if rvgroups is None:
rv_groups = self.sl.get_defined_groups(component=c, parameter='rv')[c]['rv']
else:
if not isinstance(rv_groups, (list, tuple)):
rv_groups = [rv_groups]
for rvg in rv_groups:
# the outputname
if outputname is not None:
oname = '_'.join([outputname, 'c', c, 'r', str(wmin),
str(wmax), 'g', str(rvg)]) + '.dat'
else:
oname = '_'.join(['c', c, 'r', str(wmin),
str(wmax), 'g', str(rvg)]) + '.dat'
if self.debug:
print "Writing spectrum: %s." % oname
# get the parameters
# the radial velocity
rvpar = self.sl.get_parameter(rv=rvg)[c]
# remaining parameters
cpars = reg_pars[c]
# append the radial velocity
cpars.extend(rvpar)
# print cpars
# separate those that need to be computed,
# i.e. those not defined by the grid
computepars = [par for par in cpars if par['name'] in self._not_given_by_grid]
computepars = self.extract_parameters(computepars)
# print computepars
# compute the synthetic spectra
w, i = self.synthetics[r][c].get_spectrum(wmin=wmin, wmax=wmax, korel=korel, **computepars)
# constrauct header of the file
header = ''
header += '# Component: %s\n' % str(c)
header += '# Region: (%s,%s)\n' % (str(wmin), str(wmax))
header += '# KOREL: %s\n' % str(korel)
header += '# Parameters: %s\n' % str(self.extract_parameters(cpars))
# write the file
ofile = open(oname, 'w')
ofile.writelines(header)
np.savetxt(ofile, np.column_stack([w, i]), fmt='%15.10e')
ofile.close()
# destroy the
if rvgroups is None:
rv_groups = None
class List(object):
"""
Future parent class for all the lists, which are dictionaries... :-)
"""
def __init__(self, l=None, debug=False):
"""
:param l: the list stored within the class
:param debug: debugmode on/off
:return:
"""
# list
if l is not None:
self.mainList = l
else:
self.mainList = {}
# setup debug mode
self.debug = debug
def clear_all(self):
"""
Clears the list
:return: None
"""
self.mainList = {}
class ObservedList(object):
"""
A helper class which groups all observed spectra and
prepares necessary parameters for fitting.
"""
def __init__(self, observedSpectraList=None, debug=False):
"""
:param observedSpectraList: this should not be used in general, this creates the class
assuming that we are passin the self.observedSpectraList,
this shoudl not be used probably
:param debug: debug mode
:return:
"""
# dictionary containing all observed spectra, apart from that
# it also carries information on. A group fro radial velocities
# has to be always set, because we intend to fit spectra acquired
# on different times.
self.observedSpectraList = dict(spectrum=[], group=dict(), properties=dict())
self.groupValues = dict()
# list of properties
self._property_list = ['component', 'filename', 'hasErrors', 'korel', 'loaded', 'wmin', 'wmax']
# self._queriables = copy.deepcopy(self._property_list).extend(['group'])
# although wmin, wmax can be queried, it is treated separately from the remaining
# parameters, because it cannot be tested on equality
self._queriables = [x for x in self._property_list if x not in ['wmin', 'wmax']]
self._queriable_floats = ['wmin', 'wmax']
# initialize with empty lists
self.observedSpectraList['properties'] = {key: [] for key in self._property_list}
# debug
self.debug = debug
if observedSpectraList is not None:
self.observedSpectraList = observedSpectraList
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def __len__(self):
"""
Returns number of attached observed spectra.
"""
return len(self.observedSpectraList['spectrum'])
def __str__(self):
"""
String method for the class
:return:
string.. string representation of teh class
"""
string = 'List of all attached spectra:\n'
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
string += str(spectrum)
return string
def add_one_observation(self, obs=None, update=True, **kwargs):
"""
Adds observation to the list.
:param obs observed spectrum wrapped in ObservedSpectrum class
:param update - update the observed spectra list
:param kwargs
see class ObservedSpectrum (observations module) for details.
"""
# adds the spectrum and loads it
if self.debug:
kwargs['debug'] = True
if obs is None:
obs = ObservedSpectrum(**kwargs)
self.observedSpectraList['spectrum'].append(obs)
if self.debug:
print "Adding spectrum: %s" % (str(obs))
# builds the observedSpectraList dictionary
if update:
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def add_observations(self, spec_list, update=True):
"""
:param spec_list: list of dictionaries - key words are
the same as for ObservedSpectrum class constructor
:param update: whether to update the dictionary
with the properties of the observed spectra
"""
# attachs the spectra
for rec in spec_list:
if isinstance(rec, dict):
self.add_one_observation(update=False, **rec)
else:
self.add_one_observation(update=False, obs=rec)
# builds the observedSpectraList dictionary
if update:
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def clear_all(self):
"""
Clears all spectra.
"""
self.__init__()
def get_data_groups(self, components):
"""
Returns a dictionary, containing a record
on defined group for each component.
:param components: a list of queried components
:return:
"""
groups = dict()
for component in components:
osl = self.get_spectra(verbose=True, component=component)
if self.debug:
print 'Queried observed spectra: %s for component: %s.' % (str(osl), component)
if len(osl) > 0:
groups[component] = ObservedList(observedSpectraList=osl).get_defined_groups()
return groups
def get_defined_groups(self, component=None):
"""
Reads all groups and values that are set
for the spectra in the list.
:param component
:return dictionary of defined group for all/given component
"""
if component is 'all':
component = None
# empty dicitonary for the values
groups = dict()
# go through ech spectrum and store defined values
for spectrum in self.observedSpectraList['spectrum']:
# select component
if component is not None and spectrum.component != component:
continue
for key in spectrum.group.keys():
if key not in groups.keys():
groups[key] = []
if isinstance(spectrum.group[key], (list, tuple)):
groups[key].extend(spectrum.group[key])
else:
groups[key].append(spectrum.group[key])
# only unique values are needed
for key in groups.keys():
groups[key] = np.unique(groups[key]).tolist()
return groups
def get_resolution(self, verbose=False):
"""
Reads resoolution for each spectrum
:param verbose
:return:
"""
# create a list of resolutions
resolutions = np.zeros(len(self))
for i in range(0, len(self)):
resolutions[i] = self.observedSpectraList['spectrum'][i].step
# if verbose is set returns resolution for each spectrum
if verbose:
return resolutions
# or just the maximum value
else:
return np.max(resolutions)
def get_spectra(self, verbose=False, permissive=False, **kwargs):
"""
:param kwargs.. properties of ObservedSpectrum,
that we want to return. This function does not
search the individual spectra, but the dictionary
observedSpectraList.
:param verbose return the whole bserved spectra list
stub
:param permissive
In general this could be - wmin, wmax, group,
component etc..
:return:
speclist = all spectra that have the queried properties
"""
# First of all check that all passed arguments are
# either defined among queriables or is in groups
to_pass = []
for key in kwargs.keys():
# print key, self._queriables
if (key not in self._queriables) & (key not in self._queriable_floats):
if key not in self.groupValues.keys():
if permissive:
to_pass.append(key)
continue
raise KeyError('Keyword %s is not defined. This either means, that it was not set up for '
'the observed spectra, or is an attribute of Observed spectrum, but is not '
'defined among queriables, or is wrong.' % key)
# create a copy of the spectralist
osl = copy.deepcopy(self.observedSpectraList)
# debug string
dbg_string = 'Queried: '
# reduce the list
for key in kwargs.keys():
#
if key in to_pass:
continue
# find all matching for a given key-word
keytest = key.lower()
# these can be tested on equality as strings
if keytest in self._queriables:
vind = np.where(np.array(osl['properties'][keytest], dtype=str) == str(kwargs[key]))
elif keytest == 'component':
vind = np.where((np.array(osl['properties'][keytest], dtype=str) == str(kwargs[key])) or
(np.array(osl['properties'][keytest], dtype=str) == 'all'))[0]
# that cannot be tested on equality
elif keytest == 'wmin':
vind = np.where(np.array(osl['properties'][keytest]) <= kwargs[key])[0]
elif keytest == 'wmax':
vind = np.where(np.array(osl['properties'][keytest]) >= kwargs[key])[0]
# those that are defined in groups
elif keytest in osl['group'].keys():
vind = []
for i in range(0, len(osl['spectrum'])):
if isinstance(osl['group'][keytest][i], (tuple, list)):
if kwargs[key] in osl['group'][keytest][i]:
vind.append(i)
else:
if kwargs[key] == osl['group'][keytest][i]:
vind.append(i)
vind = np.array(vind)
if len(vind) == 0:
warnings.warn('No spectrum matching %s: %s was found in the '
'list of observed spectra:\n%sDo not panic, it can '
'still be listed among \'all\'.' % (key, str(kwargs[key]), str(self)))
return []
if self.debug:
dbg_string += '%s: %s ' % (key, str(kwargs[key]))
print "%s.. %s spectra remain." % (dbg_string, str(len(vind)))
# extract them from the list
for dic in osl.keys():
# if the key refers to a dictionary
if isinstance(osl[dic], dict):
for sub_key in osl[dic].keys():
osl[dic][sub_key] = (np.array(osl[dic][sub_key])[vind]).tolist()
# if it refers to a list or array
else:
osl[dic] = (np.array(osl[dic])[vind]).tolist()
# simple output, just spectra
if not verbose:
return osl['spectrum']
# otherwise the whole remnant of the
# observed spectra list is returned
else:
return osl
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('OBSERVEDLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
# the algorithm failed to load the class
if data_start >= len(lines):
return False
# create a regionlist
ol = ObservedList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach regionlist, we end
if l.find('OBSERVEDLIST') > -1:
break
# split the linbe
d = l.split()
# print d
if d[0].find('filename') > -1:
i = 0
cdict = {}
# print d
while i < len(d):
if d[i].find(':') > -1:
j = i + 1
while j < len(d) and d[j].find(':') == -1:
j += 1
stub = d[i:j]
if len(stub) < 3:
cdict[d[i].strip(':')] = stub[1].strip(':[]{}\'\"')
else:
cdict[d[i].strip(':')] = map(int, [stub[k].strip(':[]{}\'\"') for k in range(1, len(stub))])
i = j
# it is a mess with the global error :-(
cdict['error'] = cdict['global_error']
del cdict['global_error']
# cast the parameters to the correct types
parnames = ['filename', 'component', 'error', 'korel', 'hjd']
cast_types = [str, str, float, string2bool, float]
for k in cdict.keys():
if k in parnames:
i = parnames.index(k)
if cdict[k] != 'None':
cdict[k] = cast_types[i](cdict[k])
else:
cdict[k] = None
else:
# the remaining must be groups
cdict[k] = int(cdict[k])
# add the parameter if it does not exist
groups = {key: cdict[key] for key in cdict.keys() if key not in parnames}
kwargs = {key: cdict[key] for key in cdict.keys() if key in parnames}
ol.add_one_observation(group=groups, **kwargs)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(ol, k, cdict[k])
# finally assign everything to self
attrs = ['debug', 'groupValues', 'observedSpectraList']
for attr in attrs:
setattr(self, attr, getattr(ol, attr))
# if we got here, we loaded the data
return True
def read_groups(self):
"""
Updates the dictionary observedSpectraList with group
records for every single observations and creates
the dictionary groupValues which contains lists of
all defined groups for every parameter.
For parameters != 'rv':
If at least one spectrum has a group assigned
it is automatically assumed, that it does not
belong among the remaining ones. This means
that all remaining spectra are assigned their
own group.
For parameters == 'rv':
Each spectrum is assigned unique RV group,
unless this is overriden by the user by setting
them up. This comes natural, since we are
likely to fit spectra from different times,
regions, where slight shifts in rv are
very likely.
"""
# First go through each spectrum to see, which
# groups were defined by user
groups = self.get_defined_groups()
# print groups
# check that rv has been setup - mandatory, because each observed spectrum
# is assigned its own rv_group
if 'rv' not in groups.keys():
groups['rv'] = []
# assign empty group arrays
for key in groups.keys():
self.observedSpectraList['group'][key] = np.zeros(len(self)).astype('int16').tolist()
# Assigning groups to every spectrum
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
for key in groups.keys():
# If not user defined the maximal possible
# group is assigned
if key != 'rv':
gn = spectrum.get_group(key)
def_groups = groups[key]
# print key, gn, def_groups
# if spectrum has no group, but some groups have been defined,
# the group is assigned to the least number not in defuined groups
if gn is None and len(def_groups) > 0:
gn = 0
while gn in def_groups:
gn += 1
# if no group is defined for all spectra, start with zero
elif gn is None and len(def_groups) == 0:
gn = 0
# store the groupnumber
self.observedSpectraList['group'][key][i] = gn
else:
gn = spectrum.get_group(key)
if gn is None:
self.observedSpectraList['group'][key][i] = None
else:
self.observedSpectraList['group'][key][i] = gn
# propagate the groups back to spectra
self._set_groups_to_spectra()
def read_properties(self):
"""
Goes through the attached spectra and reads
stores them within the observedSpectraList
dictionary.
"""
# initialize with empty lists
for key in self._property_list:
self.observedSpectraList['properties'][key] = np.empty(len(self), dtype=object)
# fill the dictionary
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
for key in self._property_list:
self.observedSpectraList['properties'][key][i] = getattr(spectrum, key)
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the RegionList
enviromental_keys = ['debug']
string = ' OBSERVEDLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for s in self.observedSpectraList['spectrum']:
keys = ['filename', 'component', 'korel', 'global_error', 'groups', 'hjd']
for k in keys:
if k not in ['groups']:
string += '%s: %s ' % (k, str(getattr(s, k)))
else:
for gk in s.group.keys():
if isinstance(s.group[gk], (list, tuple)):
string += '%s: ' % gk
for gn in s.group[gk]:
string += '%s ' % str(gn)
else:
string += '%s: %s ' % (gk, str(s.group[gk]))
string += '\n'
# attach enviromental keys
for ekey in enviromental_keys:
string += "%s: %s " % (ekey, str(getattr(self, ekey)))
string += '\n'
# finalize the string
string += ' OBSERVEDLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the result
ofile.writelines(string)
def set_spectrum(self, filename=None, **kwargs):
"""
Sets spectrum to a given value.
:param filename
:param kwargs:
:return:
"""
# print kwargs
for i in range(0, len(self)):
if self.observedSpectraList['spectrum'][i].filename == filename:
for key in kwargs.keys():
setattr(self.observedSpectraList['spectrum'][i], key, kwargs[key])
if key is 'group':
self.observedSpectraList['spectrum'][i].set_group(kwargs[key])
# print self
self.read_groups()
self.groupValues = self.get_defined_groups()
def _set_groups_to_spectra(self):
"""
Propagates groups, which are set in observedSpectraList,
in individual spectra.
"""
for i in range(0, len(self.observedSpectraList['spectrum'])):
group = {key: self.observedSpectraList['group'][key][i] for key in self.observedSpectraList['group'].keys()}
self.observedSpectraList['spectrum'][i].set_group(group)
class RegionList(List):
"""
"""
def __init__(self, **kwargs):
"""
Class constructor
:return:None
"""
# setup the parent class
super(RegionList, self).__init__(**kwargs)
# registered keywords
self._registered_records = ['components', 'groups', 'wmin', 'wmax']
# if not given along the class a blank one is created
if len(self.mainList.keys()) < 1:
self.mainList = {}
self._registered_regions = []
self._user_defined_groups = {}
else:
self._registered_regions = self.get_registered_regions()
def __str__(self):
"""
String representation of the class.
:return: string
"""
string = ''
# go over regions
for key0 in self.mainList.keys():
# region properties
string += "Region name: %s: (wmin, wmax) = (%s, %s):\n" % (key0, str(self.mainList[key0]['wmin']),
str(self.mainList[key0]['wmax']))
# componentn properties
for i in range(0, len(self.mainList[key0]['components'])):
string += "%s: %s " % ('component', str(self.mainList[key0]['components'][i]))
string += "%s: %s " % ('groups', str(self.mainList[key0]['groups'][i]))
string += '\n'
return string
def add_region(self, component='all', identification=None, wmin=None, wmax=None, groups=None):
"""
:param component: component for whichg the region apply
:param identification
:param wmin: minimal wavelength
:param wmax: maximal wavelength
:param groups: group numbers for this region
:return: None
"""
# if we are crazy and want to set this up
# either by wavelength or by identification
if (wmin is None or wmax is None) and identification is None:
raise ValueError('Boundaries are not set properly: (wmin,wmax)= (%s, %s)' % (str(wmin), str(wmax)))
else:
if (wmin >= wmax) and identification not in self._registered_regions:
raise ValueError('wmin is greater than wmax: %s > %s '
'or the region: %s is not registered.' % (str(wmin), str(wmax), identification))
# convert component/group/identification keys to lowercase
if groups is not None:
groups = keys_to_lowercase(groups)
else:
groups = {}
# make component case insensitive
component = component.lower()
ident = identification
if ident is not None:
ident = ident.lower()
# maybe the region has been already defined
if ident in self.mainList.keys():
region = ident
elif ident is None:
region = self.get_region(wmin, wmax)
else:
region = None
# if there is a region exists and the component is all,
# there is no point to attach it
# print region, component
if (region is not None) and (component == 'all'):
warnings.warn('The region already exists as region: %s -> doing nothing.' % region)
return
# if it is not empty
if region is not None:
if self.debug:
print "Adding component: %s to region: %s" % (component, region)
# check that the component ws not set earlier
if self.has_component(region, component):
warnings.warn('The component: %s is already set for region: %s. -> doing nothing.'
% (component, region))
return
# get lr from the region first record
# print groups, self.mainList[region]['groups']
groups['lr'] = self.mainList[region]['groups'][0]['lr']
self.read_user_defined_groups(groups)
# store everything apart from the wmin, wmax
self.mainList[region]['groups'].append(groups)
self.mainList[region]['components'].append(component)
# readout user-defined groups
self.read_user_defined_groups(groups)
else:
# setup identification for
if ident is None:
ident = 'region' + str(len(self._registered_regions)).zfill(2)
if self.debug:
print "Creating new region: %s." % ident
# register the new region
self.mainList[ident] = dict(wmin=wmin, wmax=wmax, components=[component], groups=[])
self._registered_regions.append(ident)
# if the luminosity group is not defined
if 'lr' not in groups.keys():
all_groups = self.get_defined_groups()
if 'lr' in all_groups.keys():
def_groups = all_groups['lr']
else:
def_groups = []
gn = 0
while gn in def_groups:
gn += 1
groups['lr'] = gn
# add groups to the list
self.mainList[ident]['groups'].append(groups)
# readout user-defined groups
self.read_user_defined_groups(groups)
self.setup_undefined_groups()
def clear_all(self):
"""
Clears the class.
:return:
"""
super(RegionList, self).clear_all()
self._registered_regions = []
self._user_defined_groups = {}
def get_defined_groups(self):
"""
Returns plain list of all defined groups regardless of their components.
:return: list of defined groups
"""
groups = {}
for reg in self._registered_regions:
for rec in self.mainList[reg]['groups']:
for key in rec.keys():
if key not in groups.keys():
groups[key] = [rec[key]]
else:
if rec[key] not in groups[key]:
groups[key].append(rec[key])
return groups
def get_region(self, wmin, wmax):
"""
Checks that a region with this wavelength range
does not exist.
:param wmin
:param wmax
:return:
"""
for region in self.mainList:
if (abs(self.mainList[region]['wmin'] - wmin) < ZERO_TOLERANCE) & \
(abs(self.mainList[region]['wmax'] - wmax) < ZERO_TOLERANCE):
return region
return None
def get_region_groups(self):
"""
A dictionary of groups defined for regions component by component.
:return: dictionary containing records on groups
which can be directly passed to type StarList
through set_groups
"""
groups = {}
# go over each region
for reg in self.mainList.keys():
for i in range(0, len(self.mainList[reg]['components'])):
component = self.mainList[reg]['components'][i]
comp_groups = self.mainList[reg]['groups'][i]
# setup component
if component not in groups.keys():
groups[component] = {}
# setup keys
for key in comp_groups.keys():
if key not in groups[component].keys():
groups[component][key] = [comp_groups[key]]
else:
if comp_groups[key] not in groups[component][key]:
groups[component][key].append(comp_groups[key])
return groups
def get_registered_regions(self):
"""
Returns an array of registered regions.
:return:
"""
return self.mainList.keys()
def get_wavelengths(self, verbose=False):
"""
Returns registered wavelengths
:param verbose
:return: wmins, wmaxs = arrays of minimal/maximal wavelength for each region
"""
wmins = []
wmaxs = []
regs = []
for reg in self.mainList.keys():
wmins.append(self.mainList[reg]['wmin'])
wmaxs.append(self.mainList[reg]['wmax'])
regs.append(reg)
if verbose:
return wmins, wmaxs, regs
else:
return wmins, wmaxs
def get_regions_from_obs(self, ol, append=False):
"""
Reads the region from a list of observations. In general this
function should not be used for fitting, because it
makes no sense to fit the whole spectrum.
:param ol: list of ObservedSpectrum
:param append are we appending to existing list?
:return: list of unique limits
"""
if len(ol) == 0:
raise ValueError('Cannot setup regions from observed spectra, because'
' their list is empty!')
# clear the regions if needed
if not append:
self.clear_all()
# empty arrays for limits
limits = {}
# the rounding is there get over stupid problems with float precision
for obs in ol:
component = obs.component
if component not in limits:
limits[component] = [[], []]
limits[component][0].append(np.ceil(obs.wmin))
limits[component][1].append(np.floor(obs.wmax))
# get only unique values
for i in range(0, 2):
limits[component][i] = np.unique(limits[component][i])
# check that something funny did not happen
for component in limits.keys():
if len(limits[component][0]) != len(limits[component][1]):
raise ValueError('The limits were not read out correctly from observed spectra.')
# setup the regions
for i in range(0, len(limits[component][0])):
self.add_region(component=component,
wmin=limits[component][0][i],
wmax=limits[component][1][i])
return limits
def has_component(self, region, component):
"""
Checks that certain component was attached for a given
region.
:param region:
:param component:
:return: bool has/has_not the component
"""
for regcomp in self.mainList[region]['components']:
if (regcomp == component) or (regcomp == 'all'):
return True
return False
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('REGIONLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
# if not we failed
if data_start >= len(lines):
return False
# create a regionlist
rl = RegionList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach regionlist, we end
if l.find('REGIONLIST') > -1:
break
# split the linbe
d = l.split()
# print d
if d[0].find('identification') > -1:
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
# print cdict
# cast the paramneters to teh correct types
parnames = ['wmin', 'wmax', 'identification', 'component']
cast_types = [float, float, str, str]
for k in cdict.keys():
if k in parnames:
i = parnames.index(k)
cdict[k] = cast_types[i](cdict[k])
else:
# the remaining must be groups
cdict[k] = int(cdict[k])
# add the parameter if it does not exist
groups = {key: cdict[key] for key in cdict.keys() if key not in parnames}
kwargs = {key: cdict[key] for key in cdict.keys() if key in parnames}
# print groups
# # print kwargs
rl.add_region(groups=groups, **kwargs)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(rl, k, cdict[k])
# finally assign everything to self
attrs = ['_registered_records', '_registered_regions', '_user_defined_groups',
'mainList', 'debug']
for attr in attrs:
setattr(self, attr, getattr(rl, attr))
# if we got here, we loaded the data
return True
def read_user_defined_groups(self, groups):
"""
When adding new region, all user defined groups
are read out to properly set the default groups
:param groups groups to be read
:return: None
"""
for key in groups.keys():
if key not in self._user_defined_groups.keys():
self._user_defined_groups[key] = [groups[key]]
else:
if groups[key] not in self._user_defined_groups[key]:
self._user_defined_groups[key].append(groups[key])
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the RegionList
enviromental_keys = ['debug']
string = ' REGIONLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for ident in self.mainList.keys():
for i, c in enumerate(self.mainList[ident]['components']):
string += 'identification: %s ' % ident
# write the wavelengths
for lkey in ['wmin', 'wmax']:
string += '%s: %s ' % (lkey, str(self.mainList[ident][lkey]))
# write components
string += "component: %s " % c
# and groups
for gkey in self.mainList[ident]['groups'][i].keys():
string += "%s: %s " % (gkey, str(self.mainList[ident]['groups'][i][gkey]))
string += '\n'
# setup additional parameters
string += 'env_keys: '
for ekey in enviromental_keys:
string += '%s: %s ' % (ekey, str(getattr(self, ekey)))
string += '\n'
string += ' REGIONLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def setup_undefined_groups(self):
"""
User can be a bit lazy. If we split some parameter
into more groups, we can only set group for few
and the remaining dataset gets a default one.
This nonetheless has to be run after all
regions were attached. If we do this
earlier, we will get into serious problems.
:return:
"""
# defined groups
groups = self.get_defined_groups()
# setup default group numbers for region->component
# with unset group
for region in self._registered_regions:
for i, comp_group in enumerate(self.mainList[region]['groups']):
# go over each defined group
for key in groups.keys():
# if the key is unset for the component
# we have to assign some. This must
# not be one of the user-defined.
# That is why we maintain dictionary
# of user defined groups.
if key not in comp_group.keys():
gn = 0
while gn in self._user_defined_groups[key]:
gn += 1
self.mainList[region]['groups'][i][key] = gn
class StarList(object):
"""
"""
def __init__(self, debug=False):
"""
"""
# set up debug mode
self.debug = debug
# define empty list of components
self.componentList = {}
# array storing registered components
self._registered_components = []
# defined groups
self.groups = {}
# empty dictionary for the list of
# fitted types
self.fitted_types = {}
def __len__(self):
"""
Returns number of parameters.
:return: l
"""
pass
def __str__(self):
"""
:return: string = string represantation of the class
"""
string = ''
for component in self.componentList.keys():
string += "Component: %s\n" % component
for parkey in self.componentList[component].keys():
for par in self.componentList[component][parkey]:
string += str(par)
return string
def add_component(self, component=None, groups={}, use_defaults=True, **kwargs):
"""
Setups a component - if no kwargs are given,
all parameters from the parameter_definitions
are taken.
If one wants to not-include a parameter,
params = None, has to be passed. If one
wants to add a parameter, that is not
defined in parameter definitions, just
pass parameter + value.
:param component: Registration string of the component
if None is given, it is registred as 'componentXX'
:param groups: group set to all parameters of a component
:param use_defaults
:param kwargs:
:return:
"""
# setup name of the component and create a record within
# component list
if component is None:
component = 'component' + str(len(self._registered_components))
# register he component
self._registered_components.append(component)
# the parameters will be stored in a dictionary
self.componentList[component] = dict()
pd = copy.deepcopy(parameter_definitions)
# setup groups for default parameters
for key in groups.keys():
if key in pd.keys():
pd[key]['group'] = groups[key]
# process the keyword-arguments
for key in kwargs.keys():
keytest = key.lower()
# if we pass par + value, it is just stored
if keytest in pd.keys() and kwargs[key] is not None:
self.componentList[component][keytest] = []
self.componentList[component][keytest].append(Parameter(**pd[key]))
self.componentList[component][keytest][-1]['value'] = kwargs[key]
elif kwargs[key] is None:
warnings.warn('The parameter %s is set to %s. Therefore it is not '
'included into component parameters.' % (key, str(kwargs[key])))
elif keytest not in pd.keys() and kwargs[key] is not None:
# set up group
if keytest in groups.keys():
group = groups[keytest]
self.componentList[component][keytest] = []
self.componentList[component][keytest].append(Parameter(name=key, value=kwargs[key], group=group))
self.componentList[component][keytest][-1].set_empty()
warnings.warn('The parameter %s: %s is not set among the '
'parameter definitions. Therefore you should pay '
'attention to ist settings.')
# pass all unset parameters in definitions
if use_defaults:
for key in pd.keys():
if key not in self.componentList[component].keys():
self.componentList[component][key] = []
self.componentList[component][key].append(Parameter(**pd[key]))
# readout the groups
self.read_groups()
self.get_fitted_types()
def add_parameter_to_component(self, component, p=None, **kwargs):
"""
Adds a parameter to a specific component.
:param component: component for which we want to add a parameter
:param p: assigning directly the Parameter type
:param kwargs: see Parameter class for description
:return:
"""
if p is None:
self.componentList[component][kwargs['name']] = []
self.componentList[component][kwargs['name']].append(Parameter(**kwargs))
else:
# print p['name']
self.componentList[component][p['name']].append(copy.deepcopy(p))
# redefine groups
self.read_groups()
self.get_fitted_types()
def add_parameter_to_all(self, **kwargs):
"""
Adds a parameter to all components
:param kwargs: see Parameter class
:return: None
"""
for component in self._registered_components:
self.add_parameter_to_component(component, **kwargs)
def clear(self):
"""
Clears the component list
:return: None
"""
self.componentList = {}
self._registered_components = []
def clone_parameter(self, component, parameter, index=0, **kwargs):
"""
Clones a parameter and stores it for a given component.
This function will be primarily used to clone parameters
to acount for different groups.
:param component: component for which we want to clone the parameter
:param parameter: the cloned parameter
:param index : the specific cloned parameter
:param kwargs: values we want to change for the parameter
:return: clone type_Parameter - the cloned parameter
"""
# in case we pass
if component.lower() == 'all':
components = self._registered_components
else:
components = [component]
clones = []
# go over each component
for component in components:
# copy the parameter
clone = copy.deepcopy(self.componentList[component][parameter][index])
clones.append(clone)
# adjust its values
for key in kwargs.keys():
keytest = key.lower()
clone[keytest] = kwargs[key]
# append the new component to the componentlist
self.add_parameter_to_component(component, p=clone)
return clones
def copy(self):
"""
Creates a deepcopy of the class StarList.
:return:
"""
other = StarList()
for attr in ['_registered_components', 'componentList', 'debug',
'fitted_types', 'groups']:
v = getattr(self, attr)
setattr(other, attr, copy.deepcopy(v))
return other
def delete_hollow_groups(self):
"""
Goes through parameters and deletes those that
are set to None.
:return: None
"""
for component in self._registered_components:
for parkey in self.componentList[component].keys():
i = 0
while i < len(self.componentList[component][parkey]):
# if the parameter group is not, it is deleted
if self.componentList[component][parkey][i]['group'] is None:
del self.componentList[component][parkey][i]
else:
i += 1
def delete_duplicities(self):
"""
Delete duplicities in groups.
:return: None
"""
for component in self._registered_components:
# groups can a have to be the same for two components ofc,
def_groups = []
for parkey in self.componentList[component].keys():
i = 0
while i < len(self.componentList[component][parkey]):
if self.componentList[component][parkey][i]['group'] not in def_groups:
def_groups.append(self.componentList[component][parkey][i]['group'])
i += 1
# if the parameter with the group has been already defined, delete it
else:
del self.componentList[component][parkey][i]
def get_common_groups(self):
"""
Returns a dictionary of groups shared by all components.
:return: com_groups
"""
# get the keys of physical parameters
parkeys = self.get_physical_parameters()
# get the groups
com_groups = {}
for key in parkeys:
com_groups[key] = []
# define teh reference component
comp0 = self._registered_components[0]
# groups are always common for one parameter
if len(self._registered_components) < 2:
is_common = True
# go over each group of
for i in range(0, len(self.componentList[comp0][key])):
refpar = self.componentList[comp0][key][i]
# print refpar
# at the beginning
for component in self._registered_components[1:]:
is_common = False
for j, par in enumerate(self.componentList[component][key]):
# print par
if refpar['group'] == par['group']:
is_common = True
break
if not is_common:
break
if is_common:
com_groups[key].append(refpar['group'])
return com_groups
def get_components(self):
"""
Returns list of all defined components.
:return:
"""
return copy.deepcopy(self._registered_components)
def get_defined_groups(self, component=None, parameter=None):
""":
:param component: starlist component
:param parameter: physical parameter
:return: dictionary of groups
"""
groups = {}
# setup parameters
if parameter is None:
parameters = self.get_physical_parameters()
else:
parameters = [parameter]
# setup components
if component is None or component == 'all':
components = self.get_components()
else:
components = [component]
# go over the registered componentss
for comp in components:
groups[comp]= {}
# go over passed parameters
for param in parameters:
groups[comp][param] = []
for regparam in self.componentList[comp][param]:
if regparam.name == param:
groups[comp][param].append(regparam.group)
# merge groups if component was 'all'
if component == 'all':
for p in parameters:
groups[component] = {}
temp = []
for c in components:
# print flatten_2d(groups[c][p])
temp.extend(groups[c][p])
groups[component][p] = np.unique(temp).tolist()
return groups
def get_fitted_parameters(self, verbose=False):
"""
Returns a list of fitted parameters wrapped within the Parameter class ofc.
:param verbose - return a dictionary with additional info on the
fitted parameters.
:return:
"""
fit_pars = []
# info on the fitted parameters
# is stored in a list and passed if
# necessary
if verbose:
fit_pars_info = {'component': [], 'group': [], 'name': [], 'value': []}
# go over all parameters and components
for c in self._registered_components:
for parname in self.get_physical_parameters():
for par in self.componentList[c][parname]:
if par['fitted']:
fit_pars.append(par)
if verbose:
for k in fit_pars_info.keys():
if k != 'component':
fit_pars_info[k].append(par[k])
else:
fit_pars_info[k].append(c)
if not verbose:
return fit_pars
else:
return fit_pars, fit_pars_info
def get_fitted_types(self):
"""
Stores a dictionary of fitted types for
each component in the class. This should
be updated whenever a parameter is changed.
:return:
"""
fitted_types = {}
# go over each component
for c in self.componentList.keys():
fitted_types[c] = []
# go over each parameter type
for parname in self.componentList[c]:
# and finaly over each parameter
for par in self.componentList[c][parname]:
if parname not in fitted_types[c]:
if par['fitted']:
fitted_types[c].append(parname)
else:
break
# print fitted_types
self.fitted_types = fitted_types
def get_index(self, component, parameter, group):
"""
Returns index of a component/parameter/group.
:param component:
:param parameter:
:param group:
:return:
"""
for i, par in enumerate(self.componentList[component][parameter]):
if par['group'] == group:
return i
warnings.warn('Component: %s Parameter: %s Group: %s'
' not found.' % (component, parameter, str(group)))
return None
def get_parameter_types(self):
"""
Returns a list of all parameter names
:return:
"""
partypes = []
# go over each component and parameter
for c in self._registered_components:
for p in self.componentList[c].keys():
if p not in partypes:
partypes.append(p)
return partypes
def get_parameter(self, **kwargs):
"""
Returns all parameters, which have certain group.
:param kwargs:
:return:
"""
pars = {x: [] for x in self._registered_components}
for key in kwargs.keys():
for c in self._registered_components:
for i, par in enumerate(self.componentList[c][key]):
# print i, par
if par.group == kwargs[key]:
pars[c].append(self.componentList[c][key][i])
return pars
def get_physical_parameters(self):
"""
Reads physical parameters from the starlist.
:return:
"""
pars = []
for c in self._registered_components:
pars.extend(self.componentList[c].keys())
return np.unique(pars)
def list_parameters(self):
"""
Returns a list of all parameters.s
:return:
"""
# empty output structure
return copy.deepcopy(self.componentList)
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('STARLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
if data_start >= len(lines):
return False
# create a StarList
sl = StarList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach starlist again, we end
if l.find('STARLIST') > -1:
break
d = l.split()
if d[0].find('component') > -1:
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
# cast the paramneters to teh correct types
for k in cdict.keys():
if k in ['value', 'vmin', 'vmax']:
cdict[k] = float(cdict[k])
elif k in ['group']:
cdict[k] = int(cdict[k])
elif k in ['fitted']:
cdict[k] = string2bool(cdict[k])
# add the parameter if it does not exist
c = cdict['component']
p = cdict['parameter']
if c not in sl.componentList.keys():
sl.componentList[c] = {}
sl._registered_components.append(c)
if cdict['parameter'] not in sl.componentList[c].keys():
sl.componentList[c][p] = []
# transform the array to Parameter classs
pdict = {key: cdict[key] for key in cdict.keys() if key not in ['parameter', 'component']}
pdict['name'] = p
# add the parameter to teh class
par = Parameter(**pdict)
sl.add_parameter_to_component(component=c, p=par)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(sl, k, cdict[k])
# finally assign everything to self
attrs = ['_registered_components', 'componentList', 'debug',
'fitted_types', 'groups']
for attr in attrs:
setattr(self, attr, getattr(sl, attr))
# if we got here, we loaded the data
return True
def read_groups(self):
"""
Reads all groups from the defined components. This
is then compared to the list obtained from observations
and defined regions,
:return:
"""
for component in self.componentList.keys():
self.groups[component] = dict()
for key in self.componentList[component].keys():
self.groups[component][key] = []
for par in self.componentList[component][key]:
self.groups[component][key].append(par['group'])
def remove_parameter(self, component, parameter, group):
"""
:param component: component for which the parameter is deleted
:param parameter:deleted paramer
:param group
:return:
"""
index = self.get_index(component, parameter, group)
del self.componentList[component][parameter][index]
def reset(self, parameters='all'):
"""
Leaves only one parameter per type and component.
:param parameters - list of reseted parameters
:return:
"""
# cycle over components
for c in self._registered_components:
# select all parameters
if parameters == 'all':
reset_params = self.componentList[c].keys()
else:
reset_params = parameters
# cycle over reseted parameters
for p in reset_params:
self.componentList[c][p] = [self.componentList[c][p][0]]
self.groups[c][p] = [self.groups[c][p][0]]
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the starlist
listed_keys = ['value', 'unit', 'fitted', 'vmin', 'vmax', 'group']
string = ' STARLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for c in self.componentList.keys():
for key in self.componentList[c].keys():
for par in self.componentList[c][key]:
string += 'component: %s ' % c
string += 'parameter: %s ' % key
for lkey in listed_keys:
string += '%s: %s ' % (lkey, str(par[lkey]))
string += '\n'
# setup additional parameters
enviromental_keys = ['debug']
string += 'env_keys: '
for ekey in enviromental_keys:
string += '%s: %s ' % (ekey, str(getattr(self, ekey)))
string += '\n'
string += ' STARLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def set_groups(self, groups, overwrite=False):
"""
Sets up groups - this function is designed to
use output from ObservedList.get_groups().
It is assumed that the structure is following:
dict(component01=dict(par1=[], par2=[]), component2=..)
This function should be used to primarily
used to assign rv_groups, where cloning
is necessary to not to get crazy.
This function merges groups defined
in the type and the one passed. In general
we should not be able to do this.
:param overwrite
:param groups
:return: None
"""
for component in groups.keys():
for parkey in groups[component].keys():
# bool variable for case, when we want to completely overwrite
# previous settings
first_in_list = True
for group in groups[component][parkey]:
# setting group for all components
if component.lower() == 'all':
for one_comp in self._registered_components:
# print one_comp, parkey, self.groups
if group not in self.groups[one_comp][parkey]:
warnings.warn("Group %s: %s previously undefined."
"Adding to the remaining groups." % (parkey, str(group)))
# print one_comp, parkey, group
self.clone_parameter(one_comp, parkey, group=group)
# deletes all previous groups
if overwrite and first_in_list:
while len(self.groups[one_comp][parkey]) > 1:
del self.groups[one_comp][parkey][0]
first_in_list = False
# if we are setting group only for one component
else:
if group not in self.groups[component][parkey]:
warnings.warn("Group %s: %s previously undefined."
"Adding to the remaining groups." % (parkey, str(group)))
self.clone_parameter(component, parkey, group=group)
# deletes all previous groups
if overwrite and first_in_list:
while len(self.groups[one_comp][parkey]) > 1:
del self.groups[one_comp][parkey][0]
first_in_list = False
def set_parameter(self, name, component, group, **kwargs):
"""
Sets values defined in kwargs for a parameter
of a given component and group.
:param name:
:param component:
:param group:
:param kwargs
:return:
"""
# print name, component, group, kwargs
name = name.lower()
if name not in self.get_physical_parameters():
raise Exception("Parameter: %s unknown." % name)
elif component not in self._registered_components:
# print self._registered_components, component
raise Exception("Component: %s unknown" % component)
elif group not in self.get_defined_groups(component, name)[component][name]:
raise Exception("Group \"%i\" was not defined for component \"%s\" and parameter \"%s\"!" %
(group, component, name))
else:
for i, par in enumerate(self.componentList[component][name]):
if par['name'] == name and par['group'] == group:
for key in kwargs.keys():
keytest = key.lower()
# print name, component, keytest, kwargs[key]
self.componentList[component][name][i][keytest] = kwargs[key]
# print self
# update the list of fitted types
self.get_fitted_types()
class SyntheticList(List):
"""
List of resulting synthetic spectra.
"""
def __init__(self, **kwargs):
# initialize the parent
super(SyntheticList, self).__init__(**kwargs)
| 151,519 | 35.127802 | 120 | py |
pyterpol | pyterpol-master/synthetic/auxiliary.py | import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from scipy.interpolate import splrep
from scipy.interpolate import splev
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import spline
from scipy.signal import fftconvolve
ZERO_TOLERANCE = 1e-6
def flatten_2d(arr):
"""
Flattens 2-dim array
:param arr: 2d array
:return:
"""
newarr = []
if any([isinstance(subarr, (list, tuple)) for subarr in arr]):
for subarr in arr:
if isinstance(subarr, (tuple, list)):
newarr.extend(subarr)
else:
newarr.append(subarr)
return newarr
else:
return arr
def instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True):
"""
A convolution of a spectrum with a normal distribution.
:param: wave:
:param: flux:
:param width:
:param width_type:
:return:
"""
# print "Computing instr. broadening."
# If there is no broadening to apply, don't bother
if width < ZERO_TOLERANCE:
return flux
# Convert user input width type to sigma (standard devation)
width_type = width_type.lower()
if width_type == 'fwhm':
sigma = width / 2.3548
elif width_type == 'sigma':
sigma = width
else:
raise ValueError(("Unrecognised width_type='{}' (must be one of 'fwhm'"
"or 'sigma')").format(width_type))
# Make sure the wavelength range is equidistant before applying the
# convolution
delta_wave = np.diff(wave).min()
range_wave = wave.ptp()
n_wave = int(range_wave / delta_wave) + 1
wave_ = np.linspace(wave[0], wave[-1], n_wave)
# flux_ = np.interp(wave_, wave, flux)
flux_ = interpolate_spec(wave, flux, wave_)
dwave = wave_[1] - wave_[0]
n_kernel = int(2 * 4 * sigma / dwave)
# The kernel might be of too low resolution, or the the wavelength range
# might be too narrow. In both cases, raise an appropriate error
if n_kernel == 0:
raise ValueError(("Spectrum resolution too low for "
"instrumental broadening (delta_wave={}, "
"width={}").format(delta_wave, width))
elif n_kernel > n_wave:
raise ValueError(("Spectrum range too narrow for "
"instrumental broadening"))
# Construct the broadening kernel
wave_k = np.arange(n_kernel) * dwave
wave_k -= wave_k[-1] / 2.
kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2))
kernel /= sum(kernel)
# Convolve the flux with the kernel
flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
# And interpolate the results back on to the original wavelength array,
# taking care of even vs. odd-length kernels
if n_kernel % 2 == 1:
offset = 0.0
else:
offset = dwave / 2.0
if interpolate_back:
flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1)
# flux = interpolate_spec(wave_, 1-flux_conv, wave+offset)
# Return the results.
return flux
def interpolate_block(x, block, xnew):
"""
Interpolates in each line of a 2d array.
:param x: independent variable
:type x: numpy.float64
:param block: 2d array for each column f(x)= block[i]
:type block: numpy.float64
:param xnew: point at which it is interpolated
:type xnew: float
:return:
"""
intens = np.zeros(len(block[0]))
n = len(block[:, 0])
# set up the order of interpolation
if n > 4:
k = 3
else:
k = n - 1
# k=3
# TODO Can thius be done faster with bisplrep and bisplev
# do the interpolation
for i in range(0, len(block[0])):
y = block[:, i]
tck = splrep(x, y, k=k)
intens[i] = splev(xnew, tck, der=0)
return intens
def interpolate_block_faster(x, block, xnew):
"""
Interpolation of teh spectra... hopefully faster?
:param x:
:param block:
:param xnew:
:return:
"""
# length of the datablock
nx = len(block[0])
ny = len(x)
# print x
if (ny > 3) & (ny < 6):
ky = 3
elif ny > 5:
ky = 5
else:
ky = ny - 1
# print ky
f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1)
intens = f(xnew, np.arange(nx))[0]
return intens
def interpolate_spec(wave0, intens0, wave1):
"""
Defines a function intens0 = f(wave0) and
than interpolates in it at wave1.
:param wave0: initial wavelength array
:type wave0: numpy.float64
:param intens0: initial intensity array
:type intens0: numpy.float64
:param wave1: wavelength array at which we interpolate
:type wave1: numpy.float64
:return intens1: final intensity array
:rtype intens1: numpy.float64
"""
tck = splrep(wave0, intens0, k=3)
intens1 = splev(wave1, tck)
return intens1
def is_within_interval(v, arr):
"""
Tests whether value v lies within interval [min(arr); max(arr)]
:param v: tested values
:type v: numpy.float64
:param arr: tested array
:type v: numpy.float64
:return:
:param:
:type: bool
"""
# print v, max(arr), min(arr)
if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE):
return False
else:
return True
def generate_least_number(l):
"""
Goes over integer in list and finds the
smallest integer not in the list.
:param l: the list
:return: int the smallest integer
"""
num = 0
while num in l:
num += 1
return num
def keys_to_lowercase(d):
"""
Converts dictionary keys to lowercase
:param d the converted dictionary
:return: dnew
"""
dnew = {}
for key in d.keys():
keynew = key.lower()
dnew[keynew] = d[key]
return dnew
def parlist_to_list(l, property='value'):
"""
Converts a list of Parameter class to a
regular list - only the property is returned
:param l:
:param prop:
:return:
"""
ol = []
for par in l:
ol.append(par[property])
return ol
def sum_dict_keys(d):
"""
Sums dictionary key records.
:param d: the dictionary
:return: s the sum
"""
s = 0.0
for key in d.keys():
s += d[key]
return s
def read_text_file(f):
"""
Reads ascii file f.
:param f: the file
:type f: str
:return lines: list of all lines within file f
:rtype: list
"""
ifile = open(f, 'r')
lines = ifile.readlines()
ifile.close()
return lines
def renew_file(f):
"""
Deletes an existing file.
:param f:
:return:
"""
ofile = open(f, 'w')
ofile.close()
def rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True):
"""
Rotates a spectrum represented by arrays wave and intes to the prjected
rotational velocity vrot.
:param wave: wavelength array
:type wave: numpy.float64
:param intens: intensity array
:type intens: numpy.float64
:param vrot: projected rotational velocity in km/s
:type vrot: float
:param epsilon: Coefficient of linear limb-darkening.
:type epsilon: float
:param interpolate_back: interpolate the spectrum back to the original wavelength sampling
:type interpolate_back: bool
:return intens: the rotated spectrum in the original wavelength sanmpling
:rtype intens: numpy.float64
:return intens_conv: the rotated spectrum equidistant in rv
:rtype intens_conv: numpy.float64
:return wave_conv: the wavelength array equidistant in rv
:rtype wave_conv: numpy.float64
"""
if vrot > ZERO_TOLERANCE:
# we need it equidistant in RV
wave_log = np.log(wave)
rv = np.linspace(wave_log[0], wave_log[-1], len(wave))
step = rv[1] - rv[0]
# interpolate
intens_rv = interpolate_spec(wave_log, intens, rv)
# scale rotational velocity with light speed
vrot = 1000 * vrot / c.value
# get the kernel
# velocity vector
n = int(np.ceil(2 * vrot / step))
rv_ker = np.arange(n) * step
rv_ker = rv_ker - rv_ker[-1] / 2.
y = 1 - (rv_ker / vrot) ** 2
# the kernel
kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0))
kernel = kernel / kernel.sum()
# convolve the flux
intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same')
if n % 2 == 1:
rv = np.arange(len(intens_conv)) * step + rv[0]
else:
rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2.
wave_conv = np.exp(rv)
# interpolate back
if interpolate_back:
intens = interpolate_spec(wave_conv, 1 - intens_conv, wave)
return intens
else:
return 1 - intens_conv, wave_conv
def shift_spectrum(wave, RV):
"""
Doppler-shifts spectrum.
:param wave: original wavelength array
:type wave: numpy.float64
:param RV: radial velocity in km/s
:type RV: float
:return new_wave: shifted wavelength array
:rtype new_wave: numpy.float64
"""
# shifts the wavelengths
new_wave = wave * (1 + RV * 1000 / c.value)
return new_wave
def select_index_for_multiple_keywords(d, **kwargs):
"""
From a dictionary of lists selects
one index meeting all requirements.
:param kwargs:
:return:
"""
keys = d.keys()
length = len(d[keys[0]])
for i in range(0, length):
for k in keys:
if d[k] == kwargs[k] and k == keys[-1]:
return i
return -1
def string2bool(s):
"""
Converts string to boolean.
:param s:
:return:
"""
if s.lower() in ['true', '1']:
return True
else:
return False
def write_numpy(f, cols, fmt):
"""
An example of lack of brain of the main developer of this "code".
:param f: outputfile or handler
:param cols: block of data to be writte
:param fmt: format of the blocs
:return: None
"""
np.savetxt(f, cols, fmt=fmt)
| 10,363 | 24.033816 | 115 | py |
pyterpol | pyterpol-master/synthetic/makespectrum.py | import os
import sys
import copy
import warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from auxiliary import is_within_interval
from auxiliary import instrumental_broadening
from auxiliary import interpolate_spec
from auxiliary import interpolate_block_faster
from auxiliary import read_text_file
from auxiliary import rotate_spectrum
from auxiliary import shift_spectrum
from auxiliary import ZERO_TOLERANCE
from defaults import default_grid_order
from defaults import gridDirectory
from defaults import grid_files
from defaults import gridListFile
from defaults import ABS_default_grid_order
from defaults import ABS_gridDirectory
from defaults import ABS_grid_files
from defaults import ABS_gridListFile
# CONSTANTS
class SyntheticSpectrum:
def __init__(self, f=None, wave=None, intens=None, do_not_load=False, **props):
"""
Reads the synthetic spectrum and its properties.
input:
f.. file with the spectrum
wave.. wavelength vector of the synthetic spectrum
intens.. intensity vector of the synthetic spectrum
do_not_load.. switch for cases, when we want to build the
class but do not want to load the spectrum.
**props.. properties of the spectrum, in the correct type
"""
# reads the spectrum
if f is not None:
# from file
# this delay reading of the data
self.filename = f
if not do_not_load:
self.loaded = True
self.wave, self.intens = np.loadtxt(f, unpack=True, usecols=[0, 1])
self.measure_spectrum()
else:
self.loaded = False
else:
# wavelengths and intensities are given
self.wave = wave
self.intens = intens
self.measure_spectrum()
self.loaded = True
self.filename = None
# setups properties of the synthetic spectrum
self.properties = []
for key in props.keys():
setattr(self, key.lower(), props[key])
self.properties.append(key.lower())
def __getitem__(self, key):
"""
Returns an attribute of the synthtic spectrum.
Works only with properties.
input:
key.. searched attribute
output:
prop.. value of the attributr, if not present, False
"""
if not hasattr(self, key):
return False
else:
return getattr(self, key)
def __setitem__(self, key, value):
"""
Changes physical attribute. If it does not
exist, exception is raised.
input:
key.. the attribute to be changes
value.. nbew value of the attribute
"""
if not hasattr(self, key):
raise AttributeError('The atribute %s does not exist.' % key)
else:
setattr(self, key, value)
def __str__(self):
"""
String representation.
"""
string = ""
# if taken from file, prints its name
if self.filename is not None:
string = string + "filename:%s " % (self.filename)
string = string + "loaded:%s " % (str(self.loaded))
# prints properties of the spectrum
for prop in self.properties:
string = string + "%s:%s " % (prop, str(self[prop]))
# get the wavelength boundaries
if self.loaded:
string += "(wmin, wmax): (%s, %s)" % (str(self.wmin), str(self.wmax))
string = string + '\n'
return string
def check_boundaries(self, wmin, wmax):
"""
Checks that the given wavelengths do not
overlap the sythetic spectra.
intput:
wmin = minimal wavelength
wmax = maximal wavelength
"""
# lets have a special case, where the boundaries are None
if wmin is None:
wmin = self.wmin
if wmax is None:
wmax = self.wmax
if (wmin - (self.wmin - self.step) < ZERO_TOLERANCE) | \
(wmax - (self.wmax + self.step) > ZERO_TOLERANCE):
return False
else:
return True
def keys(self):
"""
Returns a list of properties.
"""
return self.properties
def load_spectrum(self, f=None):
"""
Loads the spectrum and stores it within the type.
input:
f.. filename
"""
if f is not None:
self.filename = f
# check if a binary representation exists -- then load it
binary_file = self.filename + '.npz'
if os.path.isfile(binary_file):
npz = np.load(binary_file, mmap_mode='r')
self.wave = npz['arr_0']
self.intens = npz['arr_1']
# otherwise, load ascii (very slow!) and save it as binary
else:
self.wave, self.intens = np.loadtxt(self.filename, unpack=True, usecols=[0, 1])
print("Saving binary file: " + str(binary_file))
np.savez(binary_file, self.wave, self.intens)
# measures the spectrum and marks it as loaded
self.measure_spectrum()
self.loaded = True
def measure_spectrum(self):
"""
Stores maximal, minimal wavelength and step within the type.
"""
# saves properties of synthetic
# spectra - min, max, step
self.wmin = self.wave.min()
self.wmax = self.wave.max()
self.step = self.wave[1] - self.wave[0]
def pad_continuum(self, wave, intens, bumpsize):
"""
Pads synthetic spectrum with continua at
each end.
input:
wave, intens.. the input spectrum
output:
bump_wave, bump_intens.. the 1-padded spectrum
"""
# gets properties of teh spectrum
w0 = wave[0]
wn = wave[-1]
step = wave[1] - wave[0]
# left bump
l_bump_wave = np.arange(w0 - bumpsize, w0, step)
# left bump
r_bump_wave = np.arange(wn + step, wn + bumpsize, step)
# continuum - just ones
# l_cont = np.ones(len(l_bump_wave))
# r_cont = np.ones(len(r_bump_wave))
# continuum - just ones
l_cont = 1.0 - np.linspace(0, bumpsize, len(l_bump_wave)) * (1.0 - intens[0]) / bumpsize
r_cont = intens[-1] + np.linspace(0, bumpsize, len(r_bump_wave)) * (1.0 - intens[-1]) / bumpsize
# cretes empty arrays
total_length = len(l_bump_wave) + len(wave) + len(r_bump_wave)
bump_wave = np.zeros(total_length)
bump_intens = np.zeros(total_length)
# copy the bumpers and the spectra
imin = 0
imax = 0
for w, c in zip([l_bump_wave, wave, r_bump_wave], [l_cont, intens, r_cont]):
imax += len(w)
bump_wave[imin:imax] = w
bump_intens[imin:imax] = c
imin = imax
return bump_wave, bump_intens
def get_spectrum(self, wave=None, rv=None, vrot=None, lr=1.0, korel=False,
only_intensity=False, wmin=None, wmax=None, keep=False,
fwhm=None):
"""
Return the sythetic spectrum stored within the class. If
a set of wavelengths is provided, an interpolated spectrum
is returned.
input:
optional:
wave.. array of deesired wavelengths
rv.. radila velocity in km/s
vrot.. projected rotational velocity in km/s
only_intensity.. returns intensity only
:param korel
:param keep
output:
wave, intens.. synthetic spectrum
"""
# checks that we do not pass negative values
if vrot is not None and vrot < 0.0:
warnings.warn('vrot cannot be negative! Setting to zero!')
vrot = 0.0
if wave is None:
# for some reason we want to work with the
# whole spectrum
# print wmin, wmax
if wmin is not None and wmax is not None:
wave, intens = self.select_interval(wmin, wmax)
else:
wave = self.wave
intens = self.intens
syn_wave = wave.copy()
# adds the instrumental broadening
if fwhm is not None and fwhm > ZERO_TOLERANCE:
intens = instrumental_broadening(syn_wave, intens, width=fwhm)
if vrot is not None and vrot > ZERO_TOLERANCE:
# rotates the spectrum
# print vrot
intens = rotate_spectrum(syn_wave, intens, vrot)
if rv is not None and abs(rv) > 0.0:
# if we want to shift it, we need to pad it,
# so it does not have to extrapolate
w0min = wave.min()
w0max = wave.max()
mins = np.array([w0min, w0max])
WAVE_BUMP = np.ceil(np.max(np.absolute(mins * (1 + 1000 * rv / c.value) - mins)))
syn_wave, intens = self.pad_continuum(syn_wave, intens, WAVE_BUMP)
# shift it in RV
syn_wave = shift_spectrum(syn_wave, rv)
# the spectrum is shrinked
if lr is not None and abs(lr - 1.0) > ZERO_TOLERANCE:
intens = intens*lr
if np.any([x != None for x in [rv, vrot]]):
# interpolates back
intens = interpolate_spec(syn_wave, intens, wave)
else:
# we are interpolating, so
# we check boundaries and we
# also add some more points
# at each end of the spectrum
# because we might want to
# operate with it a bit
# usually, if it does not fit,
# we can take a longer spectrum,
# so there is no point in padding the
# spectrum # the extension is
w0min = wave.min()
w0max = wave.max()
# the velocity shift rounded up
mins = np.array([w0min, w0max])
# Securing additional points on spectrum sides
# has sense only if we plan to shift it in RV
if rv is not None and abs(rv) > ZERO_TOLERANCE:
WAVE_BUMP = np.ceil(np.max(np.absolute(mins * (1 + 1000 * rv / c.value) - mins)))
else:
WAVE_BUMP = 0.0
wmin = w0min - WAVE_BUMP
wmax = w0max + WAVE_BUMP
# print wmin, wmax, self.wmin, self.wmax
if not self.check_boundaries(wmin, wmax):
warnings.warn('Synthetic spectra do not cover the whole wavelength region' \
' extrapolation has to be employed and THAT IS DANGEROUS! Note that' \
' each spectrum is extended by %f Angstrom at each side.' % (WAVE_BUMP))
# the part of the spectrum is selected
# there is no point in working with the
# whole dataset
# print wmin, wmax
# print len(self.wave)
syn_wave, intens = self.select_interval(wmin, wmax)
# print len(syn_wave), len(intens)
# adds the instrumental broadening
if fwhm is not None and fwhm > ZERO_TOLERANCE:
# intens = instrumental_broadening(syn_wave, intens, width=fwhm)
intens = instrumental_broadening(syn_wave, intens, width=fwhm)
# rotates the spectrum
if vrot is not None and vrot > ZERO_TOLERANCE:
# intens = rotate_spectrum(syn_wave, intens, vrot)
# print syn_wave
intens, syn_wave = rotate_spectrum(syn_wave, intens, vrot, interpolate_back=False)
# adjusts the spectrum for the radial velocity
if rv is not None and abs(rv) > ZERO_TOLERANCE:
syn_wave = shift_spectrum(syn_wave, rv)
# the spectrum is shrinked
if lr is not None and abs(lr - 1.0) > ZERO_TOLERANCE:
intens = intens*lr
# print len(syn_wave), len(wave)
# interpolates to the user specified wavelengths
intens = interpolate_spec(syn_wave, intens, wave)
# if we want to extract the spectra in KOREL format
if korel:
intens = 1.0 - (lr - intens)
# if we want to update the class with what
# we computed
if keep:
# update the size of the spectrum
self.intens = intens
self.wave = wave
self.measure_spectrum()
#update its parameters
for attr, val in zip(['rv', 'vrot', 'lr', 'korel'], [rv, vrot, lr, korel]):
if val is not None:
setattr(self, attr, val)
self.properties.append(attr)
return
if only_intensity:
return intens
else:
return wave, intens
def get_size(self):
"""
Gets the size of the spectrum i.e. wmin, wmax and step.
output:
props.. dictionary with records 'wmin', 'wmax', 'step'
"""
if self.loaded:
# guarantees fresh result
self.measure_spectrum()
return self.wmin, self.wmax, self.step
else:
raise Exception('Spectrum has not been loaded yet.')
def get_properties(self):
"""
Returns dictionary with the physical properties of the
synthetic spectrum.
Output:
props.. physical properties of the sythetic spectrum
"""
# return dictionary with the physical properties
props = {key: self[key] for key in self.properties}
return props
def plot(self, ax=None, savefig=False, figname=None, **kwargs):
"""
:param figname
:param savefig
:param ax: AxesSubplot
:param kwargs:
:return:
"""
w = self.wave
i = self.intens
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
props = str({prop: self[prop] for prop in self.properties})
ax.plot(w, i, label=props, **kwargs)
ax.set_xlim(self.wmin, self.wmax)
ax.set_ylim(0.95*i.min(), 1.05*i.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=10)
# save the figure
if savefig:
if figname is None:
figname = []
for key in self.properties:
# print key, self.properties
figname.extend([key, str(self[key])])
figname.extend(['wmin', str(self.wmin)])
figname.extend(['wmax', str(self.wmax)])
figname = '_'.join(figname) + '.png'
# save the plot
plt.savefig(figname)
def select_interval(self, wmin, wmax):
"""
Selects a spectral interval from the
synthetic spectrum.
:param wmin: minimal wavelength
:param wmax: maximal wavelength
:return wave: wavelength vector
:return intens: intensity vector
"""
# print wmin, wmax, self.wave
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
# print ind
wave = self.wave[ind]
intens = self.intens[ind]
return wave, intens
def set_linear_wavelength(self, wmin, wmax, step):
"""
In case we want to attach linear wavelengths.
:param wmin
:param wmax
:param step
"""
self.wave = np.arange(wmin, wmax + step / 2., step)
def truncate_spectrum(self, wmin=None, wmax=None):
"""
Truncates the spectrum.
input:
wmin, wmax.. boundaries in wavelength
"""
if self.loaded is False:
raise Exception('The spectrum was not loaded.')
else:
# if there is a boundary missing
# if both, we are waisting compupter time
if wmin is None:
wmin = self.wave.min()
elif wmax is None:
wmax = self.wave.max()
# checks that the interval (wmin, wmax) lies within the
# spectrum. If not exception is raised
if (self.wave.min() > wmin) | (self.wave.max() < wmax):
raise ValueError('The spectrum %s does not cover the whole spectral region <%s,%s>.' % \
(str(self).rstrip('\n'), str(wmin), str(wmax)))
# does the trunctation
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
# ind = np.where(((self.wave - wmin) >= -ZERO_TOLERANCE) & ((self.wave - wmax) <= ZERO_TOLERANCE))[0]
self.wave = self.wave[ind]
self.intens = self.intens[ind]
def write_spectrum(self, filename='synspec.dat', fmt='%12.6f %12.8e', **kwargs):
"""
Writes the current synthetic spectrum.
:return:
"""
header = str(self.get_properties())
np.savetxt(filename, np.column_stack([self.wave, self.intens]), fmt=fmt, header=header)
class SyntheticGrid:
def __init__(self, mode='default', flux_type='relative', debug=False):
"""
Setup the grid.
input:
mode..
"""
# import from defaults
self.default_grid_order = default_grid_order
self.gridDirectory = gridDirectory
self.grid_files = grid_files
self.gridListFile = gridListFile
# Table containing list of the SyntheticSpectrum types
self.SyntheticSpectraList = []
# Table containing all eligible values
self.parameterList = []
self.columns = []
# grid preference order
self.gridOrder = None
# reads default grids
if mode.lower() != 'custom':
self.setup_defaults(mode, flux_type)
# updates debug mode
self.debug = debug
# Initializes array in which the wavelength
# vector is stored
self.wave = None
def __str__(self):
"""
String representation.
"""
string = "List of spectra:\n"
for rec in self.SyntheticSpectraList:
string = string + str(rec)
return string
def check_properties(self, **kwargs):
"""
Checks that at least some spectra have
"""
def clear_all(self):
"""
Empties SyntheticSpectraList
"""
self.SyntheticSpectraList = []
def deselect_exact(self, l, **props):
"""
Deletes all cases where we interpolate at exact value.
:param l: list of spectra selected with self.select_parameters
:param props: values at which we interpolate
:return:
"""
l = np.array(l)
# print np.shape(l)[-1]
keys = props.keys()
# go column by column
for i in range(0, np.shape(l)[-1]):
v = props[keys[i]]
# print v, np.unique(l[:,i])
# deselects extact matches
if np.any(abs(np.unique(l[:,i]) - v) < ZERO_TOLERANCE):
ind = np.where(abs(l[:,i] - v) < ZERO_TOLERANCE)
l = l[ind]
return l
def get_synthetic_spectrum(self, params, wave, order=2, step=0.01, padding=20.0):
"""
Method which computes the interpolated spectrum
and wraps it within SyntheticSpectrum class. This
function should be accessed by the user.
input:
params.. dictionary containing values at which
we want to interpolate
order.. number of spectra at which we are goingx
to interpolate, i.e. the order of the
fit is k = order-1 for order < 4 and
k = 3 for order > 4.
wave.. wavelength vector for which the synthetic
spectrum should be created.
"""
if isinstance(wave, (list, tuple)):
wave = np.array(wave)
# sets up the equidistant wavelength vector
wmin = wave.min() - padding
wmax = wave.max() + padding
# print wmin, wmax, step, order, padding
# overwrite the wave vector for
self.set_wavelength_vector(wmin, wmax, step)
# first of all we need to get list of parameters,
# which the program will interpolate in
parlist, vals, keys = self.select_and_verify_parameters(order=order, **params)
# second creates a list of the spectra used for interpolation
spectra = self.get_spectra_for_interpolation(parlist, keys, step=step,
wmin=wmin, wmax=wmax)
# interpolates the spectra
intens = self.interpolate_spectra(parlist, spectra, vals)
# wrapps the interpolated synthetic spectrum within SyntheticSpectrum class
spectrum = SyntheticSpectrum(wave=self.wave.copy(), intens=intens, **params)
return spectrum
def get_all(self, **kwargs):
"""
Returns all spectra having a certain property, or
the property is equal to some value. One can list
all spectra that have a certain value.
input:
kwargs.. a dictionary of property = value
"""
# just in case we got empty dictionary
if len(kwargs.keys()) == 0:
return self.SyntheticSpectraList.copy()
# goes through each stored synthetic spectrum
spectra = []
for rec in self.SyntheticSpectraList:
# goes through passed kwargs
for i, key in enumerate(kwargs.keys()):
# print rec[key]
# print kwargs[key]
# keys agrees
if key.lower() in rec.keys():
# values are the same
if (abs(kwargs[key] - rec[key.lower()]) < ZERO_TOLERANCE):
# and we are checking the last record
if i == (len(kwargs.keys()) - 1):
spectra.append(rec)
else:
break
if len(spectra) == 0:
warnings.warn("No eligible spectrum was found! You probably got out of the grid.")
return spectra
def get_available_values(self, prop, **constraints):
"""
Lists all available properties in a sorted array.
input:
prop.. the searched property
grid.. list of SyntheticSpectrum types -
in case we want to search a narrowed
down list
"""
# lets say we want to contraint the grid
if len(constraints.keys()) == 0:
grid = self.SyntheticSpectraList
else:
grid = self.get_all(**constraints)
# returns all eligible values
values = []
prop = prop.lower()
for rec in grid:
if prop in rec.keys() and rec[prop] not in values:
values.append(rec[prop])
return np.sort(values)
def get_available_values_fast(self, prop, **constraints):
"""
Returns possible values of a parameter.
"""
parLis = np.array(self.parameterList)
# print parLis
for key in constraints.keys():
# value
v = constraints[key]
# constraining column
col = self.columns.index(key)
ind = np.where(abs(parLis[:, col] - v) < ZERO_TOLERANCE)[0]
# narrow down the list
parLis = parLis[ind]
col = self.columns.index(prop.lower())
# return sorted list
return sorted(set(parLis[:, col]))
def get_spectra_for_interpolation(self, parList, header, step=0.01, wmin=None, wmax=None):
"""
Creates a list of spectra - physical spectra - this
will require some more tweaking. I would like to account
for:
1) spectrum is/is not loaded.
2) spectrum should be somehow trucated
there is no need to load all - maybe it
will even be better to load only small.
This will be controlled through this method
which loads the data. Also some spectra
can be unloaded after several iterations.
input:
output:
"""
# empty list for the spectra
syntheticSpectra = []
# switch for spectra truncation
if (wmin is None) and (wmax is None):
truncateSpectrum = False
else:
truncateSpectrum = True
# go through each row of the parameter list
for i, row in enumerate(parList):
# retrieve spectrum
props = {prop: row[j] for j, prop in enumerate(header)}
spectrum = self.get_all(**props)
# if there are two or more spectra for the
# same temperature
if len(spectrum) > 1:
spectrum = self.resolve_degeneracy(spectrum)
else:
spectrum = spectrum[0]
# load the spectrum if not
# check that spectrum is loaded and that its fitting within boundaries
if (not spectrum.loaded):
if self.debug:
print "Loading spectrum: %s" % (str(spectrum).rstrip('\n'))
else:
print "Loading spectrum: %s" % (str(spectrum).rstrip('\n'))
# loads the spectrum
spectrum.load_spectrum()
# truncates the loaded spectrum
if truncateSpectrum:
if self.debug:
print "Truncating spectrum to: (%f,%f)" % (wmin, wmax)
spectrum.truncate_spectrum(wmin, wmax)
else:
# check that the synthetic spectrum has sufficient size
# if not reaload it
if not spectrum.check_boundaries(wmin, wmax):
spectrum.load_spectrum()
# truncates the re-loaded spectrum
if truncateSpectrum:
if self.debug:
print "Truncating spectrum to: (%f,%f)" % (wmin, wmax)
spectrum.truncate_spectrum(wmin, wmax)
if self.debug:
print "Spectrum loaded: %s" % (str(spectrum).rstrip('\n'))
# We have to be sure that the spectra aren't off
# each other by less than one step
swmin, swmax, sstep = spectrum.get_size()
if np.any(np.abs([swmin - wmin, swmax - wmax, sstep - step]) > ZERO_TOLERANCE):
if self.debug:
print "Spectrum %s does not have the wavelength scale (wmin, wmax,step)=(%s, %s, %s)" % \
(str(spectrum).rstrip('\n'), str(wmin), str(wmax), str(step))
# if they do not agree - we have to interpolate
# it is cruacial that all spectra have the same
# wavelength scale
if self.wave == None:
wave = np.arange(wmin, wmax + step / 2., step)
else:
wave = self.wave
# interpolate the spectrum to the wavelength scale
intens = spectrum.get_spectrum(wave=wave, only_intensity=True)
else:
if self.debug:
print "Wavelenght scale of spectrum: %s is (wmin, wmax,step)=(%s, %s, %s)." % \
(str(spectrum).rstrip('\n'), str(wmin), str(wmax), str(step))
# read out the intensities
intens = spectrum.get_spectrum(only_intensity=True)
# print len(intens)
# append spectrum to the list
syntheticSpectra.append(intens)
return syntheticSpectra
def interpolate_spectra(self, parList, synspectra, parameters):
"""
Interpolates in all parameters.
input:
parlist.. list generated with select parameters method
synspectra.. list generated with the get_spectra_for_interpolation
method
parameters.. list of parameter values in which we interpolate
the order must be the same as in case of parlist
this is guaranteed by the ouput of select_and_verify_parameters method
output:
intens.. the resulting array of intensities
"""
# convert to arrays, easier to handle
plist = np.array(parList)
syns = np.array(synspectra)
ncol = len(plist[0])
pars = parameters
while ncol > 0:
# extract new value
xnew = pars[ncol - 1]
# print xnew
new_plist = []
new_syns = []
# take the first row
j = 0
while j < len(plist):
row = plist[j]
# narrow it down - all values
# that have the first ncol-1
# values the same are chosen
t_plist = plist.copy()
t_syns = syns.copy()
for i in range(ncol - 1):
ind = np.where(abs(t_plist[:, i] - row[i]) < ZERO_TOLERANCE)[0]
t_plist = t_plist[ind]
t_syns = t_syns[ind]
# if there is really nothing to interpolate in
# the one value is copied and we proceed to next
# step
if len(t_plist) == 1:
if self.debug:
print "Skipping interpolation in %s - there is only one spectrum for values %s." % \
(str(xnew), str(t_plist[:, :ncol - 1]))
intens = t_syns[0]
new_plist.append(row[:ncol - 1])
new_syns.append(intens)
j += len(ind)
continue
# sort according to the last columns
ind = np.argsort(t_plist[:, ncol - 1])
# extract the abscissa
x = t_plist[ind, ncol - 1]
t_syns = t_syns[ind]
if self.debug:
print "Interpolating in vector: %s at value %s." % (str(x), xnew)
# everything is sorted, we can interpolate
# unless our value is exact ofc.
intens = interpolate_block_faster(x, t_syns, xnew)
# add it to new plists and syns
new_plist.append(row[:ncol - 1])
new_syns.append(intens)
j += len(ind)
syns = np.array(new_syns)
plist = np.array(new_plist)
ncol = len(plist[0])
return syns[0]
@staticmethod
def list_modes():
"""
This method lists available modes for the SyntheticGrid.
:return:
"""
# go over differents modes
string = 'List of registered modes and their properties follows:\n'
for i in range(0, len(self.grid_files['identification'])):
string += ''.ljust(100,'=') + '\n'
string += 'mode: %s:\n' % self.grid_files['identification'][i]
string += 'directories: %s \n' % str(self.grid_files['directories'][i])
string += 'columns: %s\n' % str(self.grid_files['columns'][i])
string += 'families: %s\n' % str(self.grid_files['families'][i])
string += ''.ljust(100,'=') + '\n'
return string
def narrow_down_grid(self, **kwargs):
"""
To speed up computations, one can
narrow down the grid, to certain
family, or parameter range.
input:
One can either fix a parameter:
par = value
or fix an interval:
par = (vmin, vmax)
output:
list of synthetic spectra
"""
# separate fixed from free
fixed = {}
free = {}
for key in kwargs.keys():
if not isinstance(kwargs[key], (tuple, list)):
fixed[key] = kwargs[key]
else:
free[key] = kwargs[key]
# first narrow down the fixed ones
grid = self.get_all(**fixed)
# if there are no other restrictions -
# this option is covered with get_all
# method ofc.
if len(free.keys()) == 0:
return grid
else:
narrowed_grid = []
for rec in grid:
for i, key in enumerate(free.keys()):
if key.lower() in rec.keys():
if (rec[key.lower()] >= free[key][0]) & \
(rec[key.lower()] <= free[key][1]):
# all keys must agree
if i == len(free.keys()) - 1:
narrowed_grid.append(rec)
# if we narrowed it down to zero
if len(narrowed_grid) == 0:
warnings.warn("The narrowed-down grid is empty!!")
return narrowed_grid
def read_list_from_file(self, f, columns, directory=None, family=None):
"""
Reads list of grid spectra from a file.
input:
f.. file containing the records
columns.. column description
family.. family is provided
directory.. directory where the files are stored
this option should be used in case
when the path to the spectra = filename
is relative only within the file
"""
# read from text_file
if columns is None:
raise KeyError('Description of the input file was not specified.')
# There are two mandatory records - 1) path
# to the sythetic spectrum and 2) family tp
# which the spectrum belongs. By family I mean
# a published grid. In this family, there should
# not exist 2 spectra with the same properties
# Missing filename will raise an error,
# missing family will raise warnig, because
# all read spectra will be assigned the same family
hasFamily = False
if family is None:
for rec in columns:
if rec.upper() == 'FAMILY':
hasFamily = True
addFamily = False
else:
hasFamily = True
addFamily = True
if not hasFamily:
warnings.warn("The family (aka the grid) of spectra was not specified. Assigning family...")
families = self.get_available_values('FAMILY')
family = 'family' + str(len(families))
addFamily = True
# WE CHECK THAT THERE IS FILENAME RECORD
# FOR EACH SPECTRUM - WITHOU THAT WE WONT
# COMPUTE ANYTHING
hasFilename = False
for rec in columns:
if rec.upper() == 'FILENAME':
hasFilename = True
if not hasFilename:
raise KeyError('Record filename = path to the spectrum is missing in the column description!')
lines = read_text_file(f)
# go through file, line by line
for j, line in enumerate(lines):
# store one line = one spectrum info
rec = {}
data = line.split()
# make sure, we have description of all
# properties
if len(data) > len(columns):
raise KeyError('Description of some columns is missing.')
for i, col in enumerate(columns):
# the name should be the only string
if col.upper() in ['FAMILY']:
rec[col.upper()] = data[i]
elif col.upper() in ['FILENAME']:
rec[col.upper()] = os.path.join(directory, data[i])
else:
rec[col.upper()] = float(data[i])
# Adds family if needed
if addFamily:
rec['FAMILY'] = family
filename = rec.pop('FILENAME')
synspec = SyntheticSpectrum(f=filename, do_not_load=True, **rec)
# Adds the record so the synthetic spectra list
self.SyntheticSpectraList.append(synspec)
# Adds the record to the parameterList - so without family
rec.pop('FAMILY')
phys_cols = [x.lower() for x in columns if x not in ['FAMILY', 'FILENAME']]
self.parameterList.append([rec[col.upper()] for col in phys_cols])
# also stores identification of columns
if j == 0:
self.columns = phys_cols
def resolve_degeneracy(self, speclist):
"""
If there are more spectra having the same
parameters, one has to choose one prefered.
input:
speclist.. list of SyntheticSpectrum types corresponding to same properties
"""
# if we did not set up the order -> error
if self.gridOrder is None:
raise KeyError('There are same spectra for the same parameters.'
' I think it is because we have more grids, that overlap.'
' You can overcome this by setting gridOrder variable.')
indices = []
for i in range(0, len(speclist)):
# print speclist[i]['family']
indices.append(self.gridOrder.index(speclist[i]['family']))
# justr in case there was something peculiar
if np.any(indices < -1):
warnings.warn('At least one grid was not found in the gridOrder variable.'
' Verify that the names set in gridOrder agree with family names of spectra.')
# return spectrum with the smallest index
return speclist[np.argmin(indices)]
def select_and_verify_parameters(self, order=2, **props):
"""
A wrapper to the select_parameters method.
This method can deal with overlaps of grids.
But since it does not know the grid apriori,
it is unable to recognize wrong result.
This wrapper checks the result.
input:
order.. maximal number of interpolated spectra
props.. dictionary of interpolated parameters
output:
parlist.. each row represents one spectrum
which is needed to interpolate in
give props
vals.. values in which we interpolate
keys.. names of the interpolated
"""
# all parameters are defined lowercase
# so we have to convert it
for key in props.keys():
v = props.pop(key)
props[key.lower()] = v
if self.debug:
print "In select_and_verify_parameters: order=%i properties:" % (order)
print str(props)
# keys and values
keys = props.keys()
vals = [props[key] for key in props.keys()]
# gets the parameter list
# print order, props
parlist = self.select_parameters(order=order,**props)
# print parlist
# deselect reduntdant spectra
parlist = self.deselect_exact(parlist, **props)
if len(parlist) == 0:
raise Exception('Do %s lie within the grid? I do not think so...' % (str(props)))
if self.debug:
print 'Following parameters were chosen with select_parameters method:'
for row in parlist:
print row
# checks the result
temp = np.array(parlist)
# print temp, vals
for i, val in enumerate(vals):
# print val, temp[:, i], is_within_interval(val, temp[:, i])
if not is_within_interval(val, temp[:, i]):
raise ValueError('Parameters %s lie outside the grid.' % (str(props)))
return parlist, vals, keys
def select_parameters(self, values=[], order=2, constraints={}, **props):
"""
Creates a final list - this is still
first guess. I think that searching up
eligible values and spectra can be done
better.
input:
grid - synthetic spectraList, which is searched
order - how many spectra are used this is
adjusted dynamically if there are not
enough values
constraints - resolve conflicts between grids
props - properties in which we fit
output:
values of spectra for interpolation
"""
# extract the parameter and its values
key = props.keys()[0].lower()
v = props.pop(key)
# print key, constraints, props
# list eligible values for a given parameter
elig_vals = np.array(self.get_available_values_fast(key, **constraints))
# print key, elig_vals
# sorts the grid, from nearest to farthest
ind = np.argsort(abs(elig_vals - v))
vals = elig_vals[ind]
# equality check
# print vals, v, key
# what if the grid step is inhomogeneous? - actually it is
# in z - what shall we do, what shall we do???
if vals[:order].min() > v or vals[:order].max() < v:
# TODO think of something better than this!!!!!!
try:
lower = np.max(vals[np.where(vals - v < ZERO_TOLERANCE)[0]])
upper = np.min(vals[np.where(vals - v > ZERO_TOLERANCE)[0]])
vals = np.array([lower, upper])
except:
pass
# print lower, upper, vals
# checks that there is not equality
# if np.any(abs(vals - v) < ZERO_TOLERANCE):
# ind = np.argmin(abs(vals - v))
# vals = [vals[ind]]
#
# if self.debug:
# print "%s=%s is precise. Skipping choice of parameters." % (key, str(v))
# if the eligible values do not surround the parameter
if not is_within_interval(v, vals):
return values
# if there are no other spectra to interpolate in
if len(props.keys()) == 0:
for i in range(0, len(vals)):
row = []
# append those that are already fixed
for key in constraints.keys():
row.append(constraints[key])
# append the last parameter
row.append(vals[i])
# append the row
values.append(row)
# once 'order' spectra are appended, we can
# end
if i == order - 1:
break
return values
else:
j = 0
for i in range(0, len(vals)):
# add a constraint
constraints[key] = vals[i]
# recursively calls the function
values_new = self.select_parameters(values=copy.deepcopy(values), order=order, constraints=constraints,
**props)
# some searches are in vain - so we
# wait until meaningful calls accumulate
if len(values_new) > len(values):
j += 1
# copis the result, so we can go on
values = values_new
# remove constraint
constraints.pop(key)
if j == order:
break
return values
def setup_defaults(self, mode, flux_type):
"""
Given a key loads a grid stored within
the directory.
input:
mode.. one of the defaults mode OSTAR, BSTAR, POLLUX, AMBRE
defaulst = all
flux_type.. either relative or absolute
"""
# we do not want to bother with the case
mode = mode.upper()
flux_type = flux_type.upper()
# select the correct type of flux
# note we cannot overwrite globals, but only class variables
if flux_type == 'ABSOLUTE':
self.grid_files = ABS_grid_files
self.gridDirectory = ABS_gridDirectory
self.gridListFile = ABS_gridListFile
self.default_grid_order = ABS_default_grid_order
# select properties
ind = self.grid_files['identification'].index(mode)
if ind < 0:
raise ValueError('Default settings named %s not found.' % (mode))
dirs = self.grid_files['directories'][ind]
cols = self.grid_files['columns'][ind]
fams = self.grid_files['families'][ind]
# reads the grid files
for i, d in enumerate(dirs):
spectralist = os.path.join(self.gridDirectory, d, self.gridListFile)
directory = os.path.join(self.gridDirectory, d)
self.read_list_from_file(spectralist, cols, family=fams[i], directory=directory)
# also sets the default grid order
self.set_grid_order(self.default_grid_order)
def set_mode(self, mode='default'):
"""
Set different mode.
:param mode:
:return:
"""
debug = self.debug
self.__init__(mode=mode, debug=debug)
def set_grid_order(self, arr):
"""
Sets grid preference.
input:
arr = list of spectra grid1 > grid2 > grid3...
"""
self.gridOrder = arr
def set_wavelength_vector(self, wmin, wmax, step):
"""
Store the wavelength vector within the class.
input:
wmin.. minimal wavelength
wmax.. maximal wavelength
step.. step size in the wavelength
"""
nstep = int((wmax - wmin)/step)+1
self.wave = np.linspace(wmin, wmax, nstep)
| 45,773 | 34.319444 | 119 | py |
pyterpol | pyterpol-master/synthetic/defaults.py | # defaults settings - for more utility, this was transfered
# to init
import os, inspect
curdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# DEFINITIONS OF GRIDS OF RELATIVE SPECTRA
gridDirectory = os.path.join("/".join(curdir.split('/')[:-1]), 'grids')
# name of the file containing records on synthetic spectra
gridListFile = 'gridlist'
grid_files = dict(
identification=['DEFAULT', 'OSTAR', 'BSTAR', 'POLLUX', 'AMBRE'],
directories=[
['OSTAR_Z_0.5', 'OSTAR_Z_1.0', 'OSTAR_Z_2.0', 'BSTAR_Z_0.5', 'BSTAR_Z_1.0', 'BSTAR_Z_2.0', 'POLLUX_Z_1.0',
'AMBRE_Z_1.0'],
['OSTAR_Z_0.5', 'OSTAR_Z_1.0', 'OSTAR_Z_2.0'],
['BSTAR_Z_0.5', 'BSTAR_Z_1.0', 'BSTAR_Z_2.0'],
['POLLUX_Z_1.0'],
['AMBRE_Z_1.0']
],
columns=[['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z']
],
families=[['OSTAR', 'OSTAR', 'OSTAR', 'BSTAR', 'BSTAR', 'BSTAR', 'POLLUX', 'AMBRE'],
['OSTAR', 'OSTAR', 'OSTAR'],
['BSTAR', 'BSTAR', 'BSTAR'],
['POLLUX'],
['AMBRE']
]
)
# stores default grid order
default_grid_order = ['BSTAR', 'OSTAR', 'AMBRE', 'POLLUX']
# DEFINITIONS OF GRIDS OF ABSOLUTE SPECTRA
ABS_gridDirectory = os.path.join("/".join(curdir.split('/')[:-1]), 'grids_ABS')
# name of the file containing records on synthetic spectra
ABS_gridListFile = 'gridlist'
# POLLUX has a too narrow wavelength range => it was deleted
ABS_grid_files = dict(
identification=['DEFAULT', 'PHOENIX', 'BSTAR'],
directories=[
['OSTAR_Z_1.0', 'BSTAR_Z_1.0', 'POLLUX_Z_1.0'],
['BSTAR_Z_1.0'],
['PHOENIX_Z_1.0'],
],
columns=[
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
],
families=[
['OSTAR', 'BSTAR', 'POLLUX'],
['BSTAR'],
['PHOENIX'],
]
)
# stores default grid order
ABS_default_grid_order = ['BSTAR', 'OSTAR', 'POLLUX', 'PHOENIX']
| 2,466 | 34.242857 | 120 | py |
pyterpol | pyterpol-master/plotting/plotting.py | import copy
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import numpy as np
from scipy.stats import norm
from pyterpol.synthetic.auxiliary import read_text_file
def get_walker(db, nchain, nwalker, niter):
"""
Retrieves a walker from the chain.
:param db:
:param nchain:
:param nwalker:
:param niter:
:return:
"""
rows = np.arange(niter)
rows = nchain + nwalker*rows
return db[rows]
def plot_walkers_for_one_param(db, ipar, nwalker, niter, ax):
"""
:param db:
:param ipar:
:param nwalker:
:param niter:
:param ax:
:return:
"""
# set teh iterations
iters = np.arange(niter)
# plot each walker
for i in range(0, nwalker):
w = get_walker(db, i, nwalker, niter)
ax.plot(iters, w[:,ipar], '-')
def plot_walkers(block, niter, nwalker, indices=None, labels=None, savefig=True, figname=None):
"""
:param block:
:param indices:
:param niter:
:param nwalker:
:param labels:
:param savefig:
:param figname:
:return:
"""
if figname is not None:
savefig = True
# define which parameters are plotted
if indices is None:
indices = np.arange(len(block[0]))
npar = len(indices)
# definethe plotting grid
ncol = 3
nrow = npar / ncol
if npar % ncol > 0:
nrow += 1
# create the grid and the figure
gs1 = gs.GridSpec(nrow, ncol, hspace=0.2, wspace=0.4)
fig = plt.figure(figsize=(4*ncol, 3*nrow), dpi=100)
# plot each figure
for j, ind in enumerate(indices):
# set label
if labels is None:
label = 'p' + str(ind).zfill(2)
else:
label = labels[j]
# set the position
icol = j % ncol
irow = j / ncol
ax = fig.add_subplot(gs1[irow, icol])
# plot the walkers
plot_walkers_for_one_param(block, ind, nwalker, niter, ax)
ax.set_xlabel('Iteration number', fontsize=8)
ax.set_ylabel(label, fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = 'mcmc_convergence.png'
# plt.tight_layout()
plt.savefig(figname)
def plot_convergence(block, labels=None, relative=True, savefig=True, figname=None):
"""
Plots convergence of the chi^2 and of individual parameters.
:param block:
:param labels:
:param relative:
:param savefig
:param figname
:return:
"""
nrow, ncol = np.shape(block)
# normalize with the best value
# if relative is p[assed
if relative:
rel_block = copy.deepcopy(block)
for i in range(0, ncol):
rel_block[:,i] = block[:, i]/block[-1, i]
# start a new figure
fig = plt.figure(dpi=100, figsize=(15, 10))
ax = fig.add_subplot(111)
# plot convergence
for i in range(0, ncol):
# define the color
color = 0.1 +0.9*np.random.random(3)
if labels is not None:
ax.plot(rel_block[:,i], '-', color=color, label=labels[i])
else:
ax.plot(rel_block[:,i], '-', color=color)
ax.set_xlabel('Iteration number')
ax.set_ylabel('Relative value.')
ax.legend(loc=1, fontsize=10)
# save the plot
if savefig == True:
if figname is None:
figname = 'convergence.png'
plt.savefig(figname)
# try to produce another kind of plot
if ncol % 2 > 0:
nfigrow = ncol / 2 + 1
else:
nfigrow = ncol / 2
# setup the grid
gs1 = gs.GridSpec(nfigrow, 2, hspace=0.5)
# setup the figure
fig2 = plt.figure(dpi=100, figsize=(10, 3*nfigrow))
# plot convergence of individual parameters
for i in range(0, ncol):
ax = fig2.add_subplot(gs1[i/2, i%2])
ax.set_xlabel('Iteration number')
ax.set_ylabel('Value')
ax.set_ylabel(labels[i], fontsize=8)
ax.plot(block[:, i], 'k-', label=labels[i])
# ax.legend(loc=1)
# save the figure
fig2.savefig('convergence_2.png')
plt.close()
def plot_chi2_map(x, y, nbin=10, labels=None, savefig=True, figname=None):
"""
Plots a covariance map.
:param x parameter values
:param y parameter values
:param nbin number of bins in a histogram
:param labels
:param savefig
:param figname
:return:
"""
fs=8
# if user did not pass the labels
if labels == None:
labels = ['x', 'y']
# set up the figure
fig = plt.figure(figsize=(10,10), dpi=100)
var_axes = [221, 224]
var_data = [x, y]
# firs the plot of the variance
for i in range(0, 2):
ax = fig.add_subplot(var_axes[i])
# plot the histogram
n, bins, patches = ax.hist(var_data[i], nbin, normed=True, label=labels[0])
x_g = np.linspace(bins.min(), bins.max(), 50)
# plot the gaussian 'fit'
mean = var_data[i].mean()
var = var_data[i].std(ddof=1)
g = norm(loc=mean, scale=var)
ax.plot(x_g, g.pdf(x_g), 'r-')
# labeling
ax.set_xlabel(labels[i], fontsize=8)
ax.set_ylabel('$n_i/N$', fontsize=8)
ax.set_title(r'$\sigma$_%s=%.3f' % (labels[i], var), fontsize=8)
# plot the 2d chi2 map
ax = fig.add_subplot(223)
ax.hist2d(x, y, nbin, normed=True)
# compute the correlation
cov = ((x-x.mean())*(y-y.mean())).mean()
cor = cov/(x.std(ddof=1)*y.std(ddof=1))
# labelling
ax.set_xlabel(labels[0], fontsize=8)
ax.set_ylabel(labels[1], fontsize=8)
ax.set_title(r'$\rho$(%s, %s) = %.3f' % (labels[0], labels[1], cor), fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = '_'.join(labels) + '.png'
plt.savefig(figname)
plt.close()
def plot_variance(x, nbin=10, label=None, savefig=True, figname=None):
"""
Plots a covariance map.
:param x parameter values
:param nbin number of bins in a histogram
:param labels
:param savefig
:param figname
:return:
"""
fs=8
# if user did not pass the labels
if label is None:
label = 'x'
# set up the figure
fig = plt.figure(figsize=(6,6), dpi=100)
# firs the plot of the variance
ax = fig.add_subplot(111)
# plot the histogram
n, bins, patches = ax.hist(x, nbin, normed=True, label=label)
x_g = np.linspace(bins.min(), bins.max(), 50)
# plot the gaussian 'fit'
mean = x.mean()
var = x.std(ddof=1)
g = norm(loc=mean, scale=var)
ax.plot(x_g, g.pdf(x_g), 'r-')
# labeling
ax.set_xlabel(label, fontsize=8)
ax.set_ylabel('$n_i/N$', fontsize=8)
ax.set_title(r'$\sigma$_%s=%.3f' % (label, var), fontsize=8)
ax.legend(fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = label + '.png'
else:
figname += label+'.png'
plt.savefig(figname)
plt.close()
def read_fitlog(f):
"""
Reads the fitting log and stores it within a dictionary.
:param f:
:return:
"""
# read the file
lines = read_text_file(f)
# key counter and ouput dictionary
fitlog = {}
hkcounter = 0
# define header keys
head_keys = ['name', 'component', 'group']
for l in lines:
d = l.split()
# print d
for hk in head_keys:
if l.find(hk) > -1:
# groups are integers of course
if hk == 'group':
d[2:] = map(int, d[2:])
else:
d[2:] = d[2:]
# append the header info
fitlog[hk] = d[2:]
hkcounter += 1
break
# once we read all data, we end
if hkcounter == 3:
break
# print fitlog
# append data
fitlog['data'] = np.loadtxt(f)
return fitlog
def read_mc_chain(f):
"""
Reads the mcmc chain created with emcee
:param f: chain_file
:return:
"""
# read the file
lines = read_text_file(f)
# key counter and ouput dictionary
chainlog = {}
hkcounter = 0
# define header keys
head_keys = ['name', 'component', 'group']
for l in lines:
d = l.split()
# print d
for hk in head_keys:
if l.find(hk) > -1:
# groups are integers of course
if hk == 'group':
d[2:] = map(int, d[2:])
else:
d[2:] = d[2:]
# append the header info
chainlog[hk] = d[2:]
hkcounter += 1
break
# once we read all data, we end
if hkcounter == 3:
break
# load the file
d = np.loadtxt(f)
# get fit properties
nwalkers = int(np.max(d[:, 0])) + 1
niter = len(d[:, 0]) / nwalkers
npars = len(d[0]) - 2
# remove the first column with numbering
chainlog['data'] = d[:, 1:]
return chainlog, nwalkers, niter, npars
| 9,041 | 22.42487 | 95 | py |
pyterpol | pyterpol-master/pyterpol_examples/Interface/output/example.py | """
This tutorial serves as demonstration of how to fit observed
spectra with Pyterpol.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
We have successfully fitted the data with the differential
evolution algorithm form the SciPy library. Our next step is
to get the output from fitting.
"""
import pyterpol
# First load the session
itf = pyterpol.Interface.load('fitted.itf')
# check that everything has loaded correctly
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 49.9857247022 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 19.9864936135 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 100.009478284 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: -49.9460982465 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: -19.9589330606 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: -99.9753261321 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: sp_diff_evol optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# write a dictionary of parameters and their errors
itf.write_fitted_parameters(outputname='result.dat')
"""
c: primary p: rv g: 1 value: 49.9857 lower: -0.1070 upper: 0.1117
c: primary p: rv g: 2 value: 19.9865 lower: -0.1184 upper: 0.0943
c: primary p: rv g: 3 value: 100.0095 lower: -0.0921 upper: 0.1048
c: secondary p: rv g: 1 value: -49.9461 lower: -0.0866 upper: 0.1056
c: secondary p: rv g: 2 value: -19.9589 lower: -0.1161 upper: 0.0974
c: secondary p: rv g: 3 value: -99.9753 lower: -0.0940 upper: 0.1116
"""
# first we would like to see how our comparisons look like
# naming the figures using 'figname' is not mandatory, nut
# it is advised.
itf.plot_all_comparisons()
# we may want to export the synthetic spectra
# we can write one component in one region -
# this will export a synthetic spectrum for each
# rv_group
itf.write_synthetic_spectra(component='primary', region='region00', outputname='primary')
# or we can write everything together
itf.write_synthetic_spectra()
# convergence can be plotted - by default chi^2.
itf.plot_convergence(figname='convergence_chi.png')
# interface plots the data from the fit log, so it is
# better to save it - also even if our model/fitlog changed
# we can still plot the convergence, stored witihn a fitlog
itf.plot_convergence(parameter='all', f='fit.log', figname='convergence_parameters.png')
# and we can also plot covariance, which will tell us
# what is the uncertainty of the fit - we are interested in rv
# This will plot covariances between rvs for group 1s
itf.plot_covariances(parameters=['rv'], groups=[1], figname='rv_g_1')
# Again it is not necessary to us the registered fitlog
itf.plot_covariances(f='fit.log', parameters=['rv'], groups=[2], figname='rv_g_2')
| 5,219 | 47.333333 | 146 | py |
pyterpol | pyterpol-master/pyterpol_examples/Interface/setup/example.py | """
This tutorial serves as demonstration of how to set up an Interface.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
We will make advantage of the default behavior.
"""
import pyterpol
# 1) First we create a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=25000., logg=4.2, vrot=150., lr=0.7, z=1.0, rv=0.0)
sl.add_component(component='secondary', teff=18000., logg=4.2, vrot=50., lr=0.3, z=1.0, rv=0.0)
# 2) Now think of regions where we might want to do
# the comparison
rl = pyterpol.RegionList()
# the silicon lines
rl.add_region(wmin=6330, wmax=6375)
# Halpha
rl.add_region(wmin=6500, wmax=6600)
# 3) Now attach the data
ol = pyterpol.ObservedList()
obs = [
dict(filename='a'),
dict(filename='b'),
dict(filename='c'),
]
ol.add_observations(obs)
# 4) create the interface
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.setup()
# review the class - this is a nice example of how the
# default groups are assigned. Both components have now
# six rvs and two lrs. The explanation is simple - we have
# three observed spectra and two regions. There is one
# relative luminosity for each spectrum. and one for
# each spectrum and each region
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 4 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 5 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 6 _typedef: <type 'float'>
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 4 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 5 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 6 _typedef: <type 'float'>
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330, 6375):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500, 6600):
component: all groups: {'lr': 1}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [1, 4]} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [2, 5]} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [3, 6]} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# since our 'observed spectra' are just a model spectra
# the radial velocity and relative luminosity is the
# same for each spectrum, so we might set, that
# relative luminosity is the same for each region
# and radil velocity is the same for each spectrum.
# we have groups for the task - clear the ObservedList
# and RegionList
rl.clear_all()
ol.clear_all()
# add the regions again and set a group in relative luminosity
# for both
rl.add_region(wmin=6330, wmax=6375, groups=dict(lr=0))
rl.add_region(wmin=6500, wmax=6600, groups=dict(lr=0))
# set a radial velocity group for each spectrum
# and add some errors, so we do not have to listen to
# the errros all the time
obs = [
dict(filename='a', group=dict(rv=1), error=0.001),
dict(filename='b', group=dict(rv=2), error=0.001),
dict(filename='c', group=dict(rv=3), error=0.001),
]
ol.add_observations(obs)
# create the Interface again
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.setup()
# review - it - we can now see that there is only
# one relative luminosity and three radial velocities
# for each component.
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330, 6375):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500, 6600):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# lets save the class - it will create a text file, from
# which the interface can be easily loaded
itf.save('setup.itf')
# and have a look what does the comparisons look like -
# figname serves only as a prefix in this case.
itf.plot_all_comparisons(figname='initial')
| 9,099 | 51.601156 | 151 | py |
pyterpol | pyterpol-master/pyterpol_examples/Interface/fit/example.py | """
This tutorial serves as demonstration of how to fit observed
spectra with Pyterpol.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
No we will pick up the interface where we ended and
fit the data.
"""
import pyterpol
import numpy as np
# Create the fitting environment and load the last
# session.
itf = pyterpol.Interface.load('setup.itf')
# review the loaded interface - if we compare it with the
# previous example, we see that everything loaded as it
# should
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# the second step is to set what will be fitted. Do not forget to
# set the boundaries vmin, vmax too.
# we can do it parameter by parameter
itf.set_parameter(group=1, component='primary', parname='rv', fitted=True)
# or we can set all at once
itf.set_parameter(group=1, component='primary', parname='rv', fitted=True, vmin=-120., vmax=120.)
# or set everything for primary
itf.set_parameter(component='primary', parname='rv', fitted=True, vmin=-120., vmax=120.)
# or set everything for every rv in the StarList
itf.set_parameter(parname='rv', fitted=True, vmin=-120., vmax=120.)
# now lets set the fitter - one even does not have to construct the class
# it is sufficient to choose the fitter - lets take nelder and mead
# preferably nelder and mead, because they use boundaries
# for simplex it is good to set the initial uncertainty
init_step = 50*np.ones(6)
itf.choose_fitter('nlopt_nelder_mead', init_step=init_step, ftol=1e-6)
# lets review the whole session
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: nlopt_nelder_mead optional_arguments: {}
Initial parameters:(rv, g.): (0.0, 1); (rv, g.): (0.0, 2); (rv, g.): (0.0, 3); (rv, g.): (0.0, 1); (rv, g.): (0.0, 2);
(rv, g.): (0.0, 3);
====================================================================================================
"""
# check the initial chi-square - first we have to get the fitted parameters
# and convert list of Parameters -> list of floats
init_pars = [par['value'] for par in itf.get_fitted_parameters()]
# or we can let the function do it for us
init_pars = itf.get_fitted_parameters(attribute='value')
# or we can let the function to do it for us
init_chi2 = itf.compute_chi2(init_pars)
print "Initial chi-square: %f" % init_chi2
"""
Initial chi-square: 950375.454308
"""
# finally run the fitting
itf.run_fit()
# check the final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "Final chi-square (nlopt_nelder_mead): %f" % final_chi2
"""
Final chi-square (nlopt_nelder_mead): 144433.598816
"""
# and plot everything
itf.plot_all_comparisons(figname='final_nm')
# It is not surprising that the fitting failed - Why?!
# for radial velocities one is in general far from the
# global minimum - the variation can ce high, so
# it is better to get the firt estimate with a global method
# like differential evolution
itf.choose_fitter('sp_diff_evol')
itf.run_fit()
# check the final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "Final chi-square: %f (sp_diff_evol)" % final_chi2
"""
Final chi-square: 73.231889 (sp_diff_evol)
"""
# lets see the difference
itf.plot_all_comparisons(figname='final_de')
# The message here is that before one really tries
# to fit radiative rpoerties, it is better to do the
# fitting of RVs first. Since we are not using
# any previous information on the RVs (the orbital
# solution is not attached) it is better to
# use global method - especially for large parameter space
itf.save('fitted.itf')
| 8,806 | 45.845745 | 146 | py |
pyterpol | pyterpol-master/pyterpol_examples/SyntheticSpectrum/example.py | """
This is a tutorial script how to handle the class Synthetic Spectrum.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
# Load the spectrum using the library numpy
wave, intens = np.loadtxt('grid.dat', unpack=True, usecols=[0,1])
# The synthetic spectrum can be created either from arrays
ss = pyterpol.SyntheticSpectrum(wave=wave, intens=intens)
# or just loaded from an array
ss = pyterpol.SyntheticSpectrum(f='grid.dat')
# usually your spectra have additional properties.
# Note that parameters passed during the contruction
# of teh class do not have impact on the underlying spectrum
ss = pyterpol.SyntheticSpectrum(f='grid.dat', teff=18000, logg=4.5, idiotic_parameter='pes')
# which you can then easily review
print ss
# or change
ss['teff'] = 12000
# and review the changes
print ss
# Or we can directly view the spectrum
ss.plot(savefig=True, figname='original.png')
# the spectrum can be rotated
newwave, newintens = ss.get_spectrum(vrot=50)
# shifted in rv
newwave, newintens = ss.get_spectrum(rv=50)
# shrinked
newwave, newintens = ss.get_spectrum(lr=0.7)
# or transformed to KOREL
newwave, newintens = ss.get_spectrum(korel=True)
# or all together
newwave, newintens = ss.get_spectrum(vrot=50, lr=0.7, rv=50, korel=True)
# lets wrap this into SyntheticSpectrum and plot it
nss = pyterpol.SyntheticSpectrum(wave=newwave, intens=newintens)
nss.plot(savefig=True, figname='adjusted.png')
plt.show()
| 1,449 | 25.851852 | 92 | py |
pyterpol | pyterpol-master/pyterpol_examples/SyntheticGrid/example.py | """
This script serves a demonstration of the class SyntheticGrid.
"""
# import the library
import pyterpol
import matplotlib.pyplot as plt
# The handling of the synthetic grid is shadowed from the user,
# therefore the interaction of the user with the grid should
# restrict to only few methods.
# How to create a grid? Ups - we have forgotten, which modes are available.as
# So we create a default grid and have a look at the grids that are available
# In general user should use default grid, because it spans over all
# implemented grids
sg = pyterpol.SyntheticGrid()
# The method list_modes returns string, so it has to be printed
print sg.list_modes()
# Now we know the modes, so we can either create the grid again
sg = pyterpol.SyntheticGrid(mode='bstar')
# or just set mode for the existing one - BSTAR will be our
# exemplary grid.
sg.set_mode(mode='bstar')
# we set up a grid, so we can interpolate.
# synthetic spectra should be queried with
# the method get_synthetic_spectrum - lets do some calls
# Grid parameters have to be wrapped using
# following dictionary
pars = dict(teff=18200, logg=4.3, z=1.2)
# We should also pass some boundaries, unless we want
# to get the whole wavelength range of the grid
spectrum1 = sg.get_synthetic_spectrum(pars, [4300, 4400])
# we can view properties of the synthetic spectrum
print spectrum1
# we can of course plot_it
spectrum1.plot(savefig=True, figname='spectrum1.png')
# it is class SyntheticSpectrum, so it has all its
# features, if we want the synthetic spectrum to adopt the
# new spectrum we say that with keyword 'keep'
spectrum1.get_spectrum(vrot=30., rv=-200., lr=0.3, wmin=4300, wmax=4400, keep=True)
spectrum1.plot(savefig=True, figname='spectrum1_adjusted.png')
# A great feature of the class is that it remembers all
# loaded spectra until the program ends. This means that
# if your nect interpolation requires similar spectra
# from the grid, everything will be much faster
pars = dict(teff=18300, logg=4.2, z=1.1)
spectrum1= sg.get_synthetic_spectrum(pars, [4300, 4400])
# User can also change the resolution of the grid
# by setting keyword step and the number of the
# spectra that are used for interpolation by setting
# keyword order
# step = wavelength step
# order = maximal number of spectra, that should be used for
# interpolation
pars = dict(teff=29300, logg=3.1, z=0.74)
spectrum2 = sg.get_synthetic_spectrum(pars, [4300, 4400], order=4, step=0.05)
# # plot comparison of the two spectra
fig = plt.figure()
ax = fig.add_subplot(111)
spectrum1.plot(ax=ax)
spectrum2.plot(ax=ax)
plt.savefig('comparison.png')
| 2,610 | 31.6375 | 83 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas/v746cas_2.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import matplotlib.pyplot as plt
def inspect_spectra(f):
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
# Setting up interface is something, that should be kept
# separated from the fitting, because consequetive fits
# change teh initial settings.
def setup_interface_single_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = pyterpol.ObservedSpectrum(filename='v7c00001.asc', group=dict(rv=0), error=0.01)
# two methods how to estimate the error
print obs.get_sigma_from_continuum(cmin=6665, cmax=6670)
print obs.get_sigma_from_fft()
ol.add_observations([obs])
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, vrot=180., z=1.0, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6520, wmax=6610, groups=dict(lr=0))
rl.add_region(wmin=6665, wmax=6690, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2)
itf.setup()
# review the result - one rv group, one lr group
print itf
# plot comparison
itf.plot_all_comparisons(figname='teff17000')
# try different temperatures - this way we can easilyt review
# several comparisons
itf.set_parameter(parname='teff', value=25000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff25000')
itf.set_parameter(parname='teff', value=13000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff13000')
itf.save('initial.itf')
# if we want to fit interactively, parameter by parameter
# it is easier to use the save/load mechanism
# itf = pyterpol.Interface.load('tefffit.itf')
# itf = pyterpol.Interface.load('vrotfit.itf')
itf = pyterpol.Interface.load('loggfit.itf')
# choose a fitter
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# change another parameter
# itf.set_parameter(parname='vrot', vmin=120., vmax=200., fitted=True)
# itf.set_parameter(parname='logg', vmin=3.5, vmax=4.5, fitted=True)
itf.set_parameter(parname='z', vmin=0.5, vmax=2.0, fitted=True)
itf.set_parameter(parname='teff', vmin=15000., fitted=True)
itf.run_fit()
# get the result
# itf.plot_all_comparisons(figname='vrotfit')
itf.plot_all_comparisons(figname='zfit')
# itf.write_fitted_parameters(outputname='iter03.res')
itf.write_fitted_parameters(outputname='iter05.res')
# save a new Interface
# itf.save('vrotfit.itf')
itf.save('zfit.itf')
| 2,974 | 28.455446 | 90 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas/v746cas.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import matplotlib.pyplot as plt
def inspect_spectra(f):
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
# Setting up interface is something, that should be kept
# separated from the fitting, because consequetive fits
# change teh initial settings.
def setup_interface_single_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = pyterpol.ObservedSpectrum(filename='v7c00001.asc', group=dict(rv=0), error=0.01)
# two methods how to estimate the error
print obs.get_sigma_from_continuum(cmin=6665, cmax=6670)
print obs.get_sigma_from_fft()
ol.add_observations([obs])
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, vrot=180., z=1.0, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6520, wmax=6610, groups=dict(lr=0))
rl.add_region(wmin=6665, wmax=6690, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2)
itf.setup()
# review the result - one rv group, one lr group
print itf
# plot comparison
itf.plot_all_comparisons(figname='teff17000')
# try different temperatures - this way we can easilyt review
# several comparisons
itf.set_parameter(parname='teff', value=25000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff25000')
itf.set_parameter(parname='teff', value=13000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff13000')
itf.save('initial.itf')
# if we want to fit interactively, parameter by parameter
# it is easier to use the save/load mechanism
# itf = pyterpol.Interface.load('tefffit.itf')
# itf = pyterpol.Interface.load('vrotfit.itf')
itf = pyterpol.Interface.load('loggfit.itf')
# choose a fitter
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# change another parameter
# itf.set_parameter(parname='vrot', vmin=120., vmax=200., fitted=True)
# itf.set_parameter(parname='logg', vmin=3.5, vmax=4.5, fitted=True)
itf.set_parameter(parname='z', vmin=0.5, vmax=2.0, fitted=True)
itf.set_parameter(parname='teff', vmin=15000., fitted=True)
itf.run_fit()
# get the result
# itf.plot_all_comparisons(figname='vrotfit')
itf.plot_all_comparisons(figname='zfit')
# itf.write_fitted_parameters(outputname='iter03.res')
itf.write_fitted_parameters(outputname='iter05.res')
# save a new Interface
# itf.save('vrotfit.itf')
itf.save('zfit.itf')
| 2,974 | 28.455446 | 90 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/v746cas_2.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
def inspect_spectra(f):
"""
Plots all spectra.
:param f:
:return:
"""
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
def read_obs_from_list(f):
"""
Create a list of observations.
:param f:
:return:
"""
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
obs = []
for i,rec in enumerate(slist[:]):
o = pyterpol.ObservedSpectrum(filename=rec.rstrip('\n'), group=dict(rv=i))
o.get_sigma_from_continuum(cmin=6620., cmax=6640., store=True)
obs.append(o)
return obs
def setup_interface_more_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = read_obs_from_list('spec.lis')
ol.add_observations(obs)
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=16000., logg=3.9, vrot=95., z=1.2, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6540, wmax=6595, groups=dict(lr=0))
rl.add_region(wmin=6670, wmax=6685, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol, debug=False)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2, step=0.05)
itf.setup()
# save the session
itf.save('initial.itf')
def optimize_rv(session0, session1):
"""
Optimizes RV.
:return:
"""
# setup the spectra
itf = pyterpol.Interface.load(session0)
# set parameters
itf.set_parameter(parname='rv', fitted=True, vmin=-60., vmax=60.)
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# run fit
itf.run_fit()
# plot every comparison
itf.plot_all_comparisons(figname='rvfit')
# save the fit
itf.save(session1)
def optimize_all(session0, session1):
"""
Optimizes all parameters
:return:
"""
# setup the spectra
itf = pyterpol.Interface.load(session0)
# itf.set_one_for_all(True)
itf.set_parameter(parname='rv', fitted=True, vmin=-60., vmax=60.)
itf.set_parameter(parname='teff', fitted=True, vmin=15000., vmax=17000.)
itf.set_parameter(parname='logg', fitted=True, vmin=3.7, vmax=4.2)
itf.set_parameter(parname='vrot', fitted=True, vmin=80., vmax=160.)
itf.set_parameter(parname='z', fitted=True, vmin=1.0, vmax=2.0)
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-5)
# run fit
itf.run_fit()
# plot every comparison
itf.plot_all_comparisons(figname='nmallfit')
# save the fit
itf.save(session1)
return itf
# setup the interface
setup_interface_more_obs()
# run the optimization
itf = optimize_all('initial.itf', 'nmallfit_newinit.itf')
# plot the comparisons found with the minimizer
#itf.plot_all_comparisons()
# set errors for mc, mc estimation, they should lie within the interval
# there is no point in fitting the z, since it is converging of of the
# grid.
#itf.set_error(parname='rv', error=10.)
#itf.set_one_for_all(True)
#itf.set_parameter(parname='teff', vmin=15000., vmax=16500.)
#itf.set_parameter(parname='logg', vmin=3.5, vmax=4.2)
#itf.set_parameter(parname='vrot', vmin=120., vmax=160.)
#itf.set_parameter(parname='z', value=2.0, fitted=False)
#itf.run_mcmc(chain_file='chain.dat', niter=200)
| 3,716 | 24.993007 | 89 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/v746cas_eval_mcmc.py | import pyterpol
# check convergence of individual parameters
pyterpol.Interface.plot_convergence_mcmc('chain.dat', figname='mcmc_convergence.png')
# plot covariance of radiative parameters
pyterpol.Interface.plot_covariances_mcmc('chain.dat', parameters=['vrot', 'teff', 'logg'], figname='mcmc_correlations.png')
# plot variance of rvs
pyterpol.Interface.plot_variances_mcmc('chain.dat', parameters=['rv'], figname='rv_var')
# write result
pyterpol.Interface.write_mc_result('chain.dat', outputname='mcmc.res') | 514 | 38.615385 | 123 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/ondrejov/mask_tell.py | import sys
import numpy as np
tellbase = [
[6522., 6525.5],
[6530., 6538.5],
[6541.8, 6550.37],
[6551.75, 6554.9],
[6557., 6560],
[6563.6, 6564.8],
[6568.38, 6576.3],
[6580.2, 6588.2],
[6594.2, 6596.],
[6598.8, 6603.4]
]
def remove_telluric(f):
"""
Removes intervals defined in tellbase
:param f:
:return:
"""
# load the data
w,i = np.loadtxt(f, unpack=True, usecols=[0,1])
#remove wavelength intervals line by line
for lim in tellbase:
ind = np.where((w <= lim[0]) | (w >= lim[1]))[0]
w = w[ind]
i = i[ind]
np.savetxt(f, np.column_stack([w,i]), fmt='%12.6f')
def main():
f = sys.argv[1]
remove_telluric(f)
if __name__ == '__main__':
main()
| 770 | 15.404255 | 56 | py |
pyterpol | pyterpol-master/pyterpol_examples/ObservedList/example.py | """
This example demonstrates how to prepare observations.
"""
import pyterpol
# create a blank list
ol = pyterpol.ObservedList()
# now we are ready to attach some data - lets have a look at the data first
# the spectrum is not a KOREL spectrum, so we do not have to pass additional
# information
obs1 = pyterpol.ObservedSpectrum(filename='o.asc')
# lets plot the spectrum
obs1.plot(figname='observed1.png', savefig=True)
# Lets pretend that the second spectrum is a KOREL spectrum,
# because it is not really important, what is what now.
obs2 = pyterpol.ObservedSpectrum(filename='o2.asc', component='primary', korel=True)
obs2.plot(figname='observed2.png', savefig=True)
# Now attach the sspectra to the ObservedList one by one
ol.add_one_observation(obs=obs1)
ol.add_one_observation(obs=obs2)
# review the class
print ol
# the name suggests that the spectra can be attached all
# lets clear the list first
ol.clear_all()
# add them all at once
ol.add_observations([obs1, obs2])
# review
print ol
# we saw that pyterpol complains a lot about not having the errors
# of the spectra. We also saw that no groups were assigned. That is
# because the default groups are set only by the Interface class.
# lets clear the list again
ol.clear_all()
# It is not necessary to wrap the observations into
# the class ObservedSpectrum. ObservedList does that
# for us. We only have to pass the parameters. Also lets
# pass some errors, and some groups
ol.add_one_observation(filename='o.asc', error=0.01, group=dict(rv=1))
ol.add_one_observation(filename='o2.asc', error=0.01, group=dict(rv=2), component='primary', korel=True)
# We can see that groups were set. In this configuration a separate set of radial
# velocities would be fitted for each spectrum. Such configuration is desirable
# if we work with different observed spectra.
print ol
# lets clear the class for the las time
ol.clear_all()
# If our spectra were different regions from one long spectrum,
# we may want to have the velocity same for each spectrum. Lets
# add the observations as a list of dictionaries
obs = [
dict(filename='o.asc', error=0.01, group=dict(rv=1)),
dict(filename='o2.asc', error=0.01, group=dict(rv=1), component='primary', korel=True)
]
ol.add_observations(obs)
# in this configuration there will be only one velocity
# for the two spectra. It has to be stresses that although
# two components have the same group for a parameter,
# THE SAME PARAMETER WILL NEVER BE FITTED. EACH COMPONENT
# GETS ALWAYS ITS OWN PARAMETER FOR EACH GROUP.
print ol
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your ObservedList has been added to a interface. | 2,934 | 34.361446 | 104 | py |
pyterpol | pyterpol-master/pyterpol_examples/StarList/example.py | """
This script demonstrates capabilities of the StarList class.
"""
import pyterpol
# create an empty class
sl = pyterpol.StarList()
# pyterpol knows a set of parameters, which are given to a component
# these parameters are teff, logg, z, lr, vrot and rv. Therefore
# adding component is possible by just calling:
sl.add_component()
# review
print sl
# in general it is better to name the component, because
# than it is easier to identify what belongs to what:
sl.add_component('secondary')
# review
print sl
# and the best is to pass values of all parameters
sl.add_component('tertiary', teff=15000., logg=4.0, vrot=10., rv=-20., lr=0.1, z=1.6)
# review
print sl
# what if we do not want the component to have some parameters?
# just pass None for the parameter and forbade pyterpol
# froim using defaults
sl.add_component('quaternary', teff=None, logg=None, vrot=10., rv=-20., lr=0.1, z=None, use_defaults=False)
# In a rare case, when we want to define a parameter,
# which is not listed among the default ones, we can
# add a parameter using method add_parameter_to_component.
# First one must pass name of the component to which
# we add the data (See why it is better to set your
# component names:-) and after that just pass attributes
# of a parameter.
sl.add_parameter_to_component(component='secondary', name='Stupid_parameter',
value=6, unit='half_a_dodo', fitted=False,)
# Nevertheless if you grid is given by parameters not
# listed among the default ones, we encourage you
# to add the parameter to default ones.
#review
print sl
# What if we want to see a list of all defined physical parameters
print sl.get_physical_parameters()
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your StarList has been added to a interface.
# TODO
| 2,079 | 31 | 107 | py |
pyterpol | pyterpol-master/pyterpol_examples/disentangled_spectra_fitting/hd81357/hd81357_plot.py | """
This shows how to evaluate the outcome of the fitting
We were fitting the disentangled spectra of the secondary.
"""
import pyterpol
# 1) Load the last session - create an empty Interface
itf = pyterpol.Interface()
# fill it with teh last session
itf.load('hd81357.sav')
# 2) Have a look at the comparisons
itf.plot_all_comparisons(figname='finalfit')
# 3) Export the disentangle spectra
itf.write_synthetic_spectra(outputname='final_spectra')
# 4) Have a look how everything converged
itf.plot_convergence(figname='covergence_hd81357.png')
# 5) Have look at uncertainty of the fit
itf.plot_covariances(nbin=20)
| 627 | 20.655172 | 58 | py |
pyterpol | pyterpol-master/pyterpol_examples/disentangled_spectra_fitting/hd81357/hd81357.py | """
Real life demonstration. HD81357 is an interacting binary.
Its secondary is a Roche-lobe filling star, which is probably
losing its mass. We obtained disentangled spectra of the secondary
in two spectral regions. Here is an estimate of its radiative properties.
"""
# import numpy as np
import pyterpol
## 1) Create RegionList. This step is mandatory.
rl = pyterpol.RegionList()
# Add some regions - two in our case
rl.add_region(wmin=6324, wmax=6424, groups=dict(lr=0))
rl.add_region(wmin=4380, wmax=4497, groups=dict(lr=1))
# 2) Create ObservationList
ol = pyterpol.ObservedList()
# attach the disentangled spectra - in case of KOREL data it is mandatory to specify
# to which component the spectrum belongs and flag it as a KOREL spectrum
obs = [
dict(filename='DE_blue02_n.dat', component='secondary', korel=True, error=0.01),
dict(filename='DE_red02_n.dat', component='secondary', korel=True, error=0.01)
]
ol.add_observations(obs)
# 3) Create StarList
sl = pyterpol.StarList()
# add components
# value of each component is passed at the additionb
sl.add_component(component='secondary', teff=4200., logg=1.70, vrot=20., lr=0.2)
# 4) Create the Interface
# Interface serves a wrapper to the three different Lists, fitting environment
# and the synthetic grids. Fitting environment can be added later
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# define grid properties - we want the program to use the
# cubic spline.
itf.set_grid_properties(order=4)
# Once you setup the Interface, you should not change any List
# using anything other than methods defined in the Interface.
# Doing so, may lead to unpredictable consequences.
itf.setup()
# 4) set the parameters - setting up boundaries is very important, it not only
# speeds up the computation, but also prevents the code from running out of
# the grid
itf.set_parameter(component='secondary', parname='teff', fitted=True, vmin=4005., vmax=6000.)
itf.set_parameter(component='secondary', parname='vrot', fitted=True, vmin=10., vmax=30.)
itf.set_parameter(component='secondary', parname='lr', fitted=True, vmin=0.02, vmax=0.4)
itf.set_parameter(component='secondary', parname='lr', group=1, fitted=True, value=0.10, vmin=0.05, vmax=0.4)
itf.set_parameter(component='secondary', parname='rv', fitted=True, vmin=-20.0, vmax=20.0)
# 6) choose a fitting environment - in this case it is nelder mead and
# the tolerated relative change of chi^2
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# check that everything is set as intended - we have
# two relative luminosities and two radial velocities
# and that's what we wanted
print itf
"""
==============================================StarList==============================================
Component: secondary
name: rv value: 0.0 vmin: -20.0 vmax: 20.0 fitted: True group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -20.0 vmax: 20.0 fitted: True group: 2 _typedef: <type 'float'>
name: teff value: 4200.0 vmin: 4005.0 vmax: 6000.0 fitted: True group: 0 _typedef: <type 'float'>
name: vrot value: 20.0 vmin: 10.0 vmax: 30.0 fitted: True group: 0 _typedef: <type 'float'>
name: logg value: 1.7 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.2 vmin: 0.02 vmax: 0.4 fitted: True group: 0 _typedef: <type 'float'>
name: lr value: 0.1 vmin: 0.05 vmax: 0.4 fitted: True group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6324, 6424):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (4380, 4497):
component: all groups: {'lr': 1}
============================================ObservedList============================================
List of all attached spectra:
filename: DE_blue02_n.dat component: secondary korel: True loaded: True hasErrors: True global_error: 0.01 group: {'rv': [2]} (min, max): (4377.0, 4500.0)
filename: DE_red02_n.dat component: secondary korel: True loaded: True hasErrors: True global_error: 0.01 group: {'rv': [1]} (min, max): (6321.0, 6426.96)
===============================================Fitter===============================================
Fitter: nlopt_nelder_mead optional_arguments: {'ftol': 1e-06}
Initial parameters:(lr, g.): (0.2, 0); (lr, g.): (0.1, 1); (rv, g.): (0.0, 1); (rv, g.): (0.0, 2); (teff, g.): (4200.0, 0);
(vrot, g.): (20.0, 0);
====================================================================================================
"""
# get the initial chi-square
init_pars = itf.get_fitted_parameters(attribute='value')
init_chi2 = itf.compute_chi2(init_pars)
print "The initial chi-square: %f" % (init_chi2)
"""
The initial chi-square: 20739.073943
"""
# 7) run fitting
itf.run_fit()
# get teh final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "The final chi-square: %f" % (final_chi2)
"""
The final chi-square: 5110.224473
"""
# write the fit result
itf.write_fitted_parameters(outputname='result.dat')
# 8) when the fit is done, save the file
itf.save('hd81357.sav')
# 9) Have a look at the comparisons
itf.plot_all_comparisons(figname='finalfit')
# 10) Export the disentangle spectra
itf.write_synthetic_spectra(outputname='final_spectra')
# 11) Have a look how everything converged
itf.plot_convergence(figname='covergence_hd81357.png')
# 12) Have look at uncertainty of the fit
itf.plot_covariances(nbin=20, parameters=['lr', 'teff', 'vrot', 'logg'])
| 5,610 | 39.366906 | 154 | py |
pyterpol | pyterpol-master/pyterpol_examples/Fitter/example.py | """
This function demonstrates usage of the class Fitter.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
import time
# define a function that will be minimized
def func(x):
"""
Polynomial of order 4
:param x:
:param p:
:return:
"""
x = x[0]
return 0.5*x**4 - 2*x**3 - 5*x**2 + 12*x - 2
# create an empty fitter
fitter = pyterpol.Fitter()
# fitter is designed to work with sets of Parameter types.
# so we create one.
par = pyterpol.Parameter(name='x', value=-5., vmin=-100., vmax=100., fitted=True)
# What kind of fitter will we choose... lets have a look at
# the available ones. Note that optional arguments thatr
# control each fit are also listed. For detail have
# a look at the homepage of each environment
print fitter.list_fitters()
# nlopt_nelder_mead is always a good choice - note that
# this function expects a list of parameters not a single
# parameter, so we have to put it into brackets. Ftol
# sets that the fitting will end once the relative change
# of the cost function is less than 1e-6.
fitter.choose_fitter('nlopt_nelder_mead', fitparams=[par], ftol=1e-6)
# check the fitter
print fitter
# we can run the fitting by calling the fitter
t0 = time.time()
fitter(func)
dt1 = time.time() - t0
# have a look at the minimum and the value at minimum
print "func(%s) = %s" % (fitter.result, func(fitter.result))
# lets plot the function to be sure that we are in the global minimum
x = np.linspace(-10, 10, 200)
plt.plot(x, [func([v]) for v in x], 'k-', label='func(x)')
plt.plot(fitter.result, func(fitter.result), 'ro')
plt.ylim(-100, 100)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.savefig('result_nm.png')
plt.close()
# We see that the minimizer failed to find the global minimum
# it is not very unusual if we have similar minima.
# lets choose a more capable fitting environment
fitter.choose_fitter('sp_diff_evol', fitparams=[par])
# run the fitting
t0 = time.time()
fitter(func)
dt2 = time.time() - t0
# have a look at the minimum and the value at minimum
print "func(%s) = %s" % (fitter.result, func(fitter.result))
# lets plot the function to be sure that we are in the global minimum
x = np.linspace(-10, 10, 200)
plt.plot(x, [func([v]) for v in x], 'k-', label='func(x)')
plt.plot(fitter.result, func(fitter.result), 'ro')
plt.ylim(-100, 100)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.savefig('result_de.png')
# The simplex was faster, but converged only
# locally where the differential evolution
# converged correctly at the cost of ten times
# longer computation time
print "T(simplex) = %s" % str(dt1)
print "T(differential_evolution) = %s" % str(dt2)
| 2,764 | 28.105263 | 89 | py |
pyterpol | pyterpol-master/pyterpol_examples/RegionList/example.py | """
This script demonstrates capabilities of the RegionList class.
"""
import pyterpol
# create an empty class
rl = pyterpol.RegionList()
# add a region - the simplest way
rl.add_region(wmin=4300, wmax=4500)
# add a region, define name
rl.add_region(wmin=6200, wmax=6600, identification='red')
# for some reason we may want to fit a region only for
# one component. Then we have to specify it. Of course
# the component has to be among those defined in StarList
# to work well later.
rl.add_region(wmin=7600, wmax=7800, identification='nir', component='primary')
# We can now check, that every group was assigned different relative
# luminosity group lr
print rl
# What if we want to fit the same relative luminosity for two regions?
# When setting groups manually you have to be careful, not to assign
# group that is the same as one of those created automatically.
# Automatically they are created 1, 2, 3 - unless one has been efined by user.
rl.add_region(wmin=6340, wmax=6350, identification='SiA', groups=dict(lr=100))
rl.add_region(wmin=6365, wmax=6375, identification='SiB', groups=dict(lr=100))
# Now there will be only one lr group for silicon lines
print rl
# if the user want to get a list of defined regions
print rl.get_registered_regions()
# or a list of wavelength limits
print rl.get_wavelengths()
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your RegionList has been added to a interface.
# We may just want to create a comparison of observed and synbthetic
# spectra and we may be very lazy. Then it is possible to read the
# regions from synthetic data.
# TODO | 1,879 | 35.153846 | 89 | py |
pyterpol | pyterpol-master/grids_ABS/ready_phoenix.py |
"""
ready_phoenix.py
Convert Phoenix synthetic spectra from FITS to DAT.
"""
__author__ = "Miroslav Broz (miroslav.broz@email.cz)"
__version__ = "Jun 23rd 2016"
import sys
import numpy as np
from scipy.interpolate import splrep, splev
from astropy.io import fits
from pyterpol.synthetic.auxiliary import instrumental_broadening
def read_wave(filename):
"""Read wavelength data"""
hdu = fits.open(filename)
wave = hdu[0].data
hdu.info()
print("")
print(repr(hdu[0].header))
print("")
print(wave)
print("")
hdu.close()
return wave
def fits2dat(filename, wave):
"""Convert Phoenix synthetic spectrum from FITS to DAT."""
hdu = fits.open(filename)
intens = hdu[0].data
hdu.info()
print("")
print(repr(hdu[0].header))
print("")
print(intens)
print("")
hdu.close()
# np.savetxt(filename[:-4]+'dat.0', np.column_stack([wave, intens]), fmt="%.6e %.10e") # dbg
# convolution (to 1 Angstrom)
step = 1.0
intens = instrumental_broadening(wave, intens, width=step)
s = np.array(zip(wave, intens))
print(intens)
print("")
print(s)
print("")
# spline interpolation
wmin = s[0,0]
wmax = s[-1,0]
wnew = np.arange(wmin, wmax, step)
tck = splrep(s[:,0], s[:,1])
s_new = splev(wnew, tck)
intens = s_new
# elliminate negatives!
for i in xrange(0,len(intens)):
if intens[i] < 0.0:
intens[i] = 0.0
intens[i] = intens[i]*1.e-8 # erg s^-1 cm^-2 cm^-1 -> erg s^-1 cm^-2 A^-1 (as in POLLUX)
# save spectra
out = filename[:-4]+'vis.dat'
np.savetxt(out, np.column_stack([wnew, intens]), fmt="%.6e %.10e")
sys.exit(1) # dbg
def main():
"""Convert all files"""
if len(sys.argv) > 1:
inlist = sys.argv[1:]
else:
inlist = np.loadtxt("inlist", dtype=str)
wave = read_wave("WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
for filename in inlist:
fits2dat(filename, wave)
if __name__ == "__main__":
main()
| 2,065 | 20.747368 | 97 | py |
pyterpol | pyterpol-master/observed/observations.py | import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splrep
from scipy.interpolate import splev
# repeat userwarnings
warnings.simplefilter('always', UserWarning)
class ObservedSpectrum:
"""
A wrapper class for the observed spectra.
"""
def __init__(self, wave=None, intens=None, error=None, filename=None,
component='all', korel=False, group=None, debug=False,
instrumental_width=0.0, **kwargs):
"""
Setups the class.
:param wave: wavelength vector (typically in angstrom)
:param intens: intensity vector (typically relative)
:param error: either error vector, or one value that will apply for whole spectrum
:param filename: ascii (2 or 3 columns - wave, intens error) with the data
:param component: components in the spectrum -- by default set to 'all'
:param korel: flag defining that spectrum was obtained with KOREL - by default false
:param group: different spectra can be grouped under certain parameter
e.g. group=dict(rv=1) that rv denoted by grioup one will
be assigned to this spectrum. This is convenient if for
example the same RV is assigned to a set of spectra.
:param instrumental_width: width of the instrumental profile from which the instrumental
broadening is computed in Angstrom (or any other wavelength in
which the observed spectra are calibrated). By default it
is zero.
:param hjd: Heliocentric Julian date can be assigned to each observed
spectrum.
"""
# empty arrays, taht will be filled
# with read_size
self.wmin = None
self.wmax = None
self.step = None
self.npixel = None
# pass all arguments
self.wave = wave
self.intens = intens
# lets have a look at the errors
if error is None:
warnings.warn("I found no array with errorbars of observed intensities. "
"Do not forget to assign them later!")
self.error = None
self.global_error = None
self.hasErrors = False
# sets that the spectrum is loaded
if (wave is not None) and (intens is not None):
self.loaded = True
self.read_size()
# check lengths of intens and wave
self.check_length()
# set the error
if isinstance(error, (float, int)) and error is not None:
self.error = np.ones(len(wave)) * error
self.hasErrors = True
self.global_error = error
elif error is not None:
self.error = error
self.hasErrors = True
self.global_error = None
else:
self.loaded = False
# if we provided the filename
self.filename = filename
if (not self.loaded) and (self.filename is not None):
self.read_spectrum_from_file(filename, global_error=error)
elif (not self.loaded) and (self.filename is None):
warnings.warn('No spectrum was loaded. This class is kinda useless without a spectrum. '
'I hope you know what you are doing.')
# assignes component
self.component = component
# setup korel and check that it is proper
self.korel = korel
self.check_korel()
# setup the group
self.group = dict()
if group is not None:
self.set_group(group)
# assigns the projected slit width
self.instrumental_width = instrumental_width
# setup debug mode
self.debug = debug
# if there is hjd passed, it is assigned to the spectrum
self.hjd = kwargs.get('hjd', None)
def __str__(self):
"""
String representation of the class.
"""
string = ''
for var in ['filename', 'component', 'korel', 'loaded', 'hasErrors', 'global_error', 'group', 'hjd']:
string += "%s: %s " % (var, str(getattr(self, var)))
if self.loaded:
string += "%s: %s " % ('(min, max)', str(self.get_boundaries()))
string += '\n'
return string
def check_korel(self):
"""
If korel is set, component must be set too.
"""
if self.korel and str(self.component).lower() == 'all':
raise ValueError('In the korel regime, each spectrum must be assigned component! '
'Currently it is set to %s.' % str(self.component))
def check_length(self):
"""
Checks that wavelengths and intensities have the same length.
"""
if len(self.wave) != len(self.intens):
raise ValueError('Wavelength vector and intensity vector do not have the same length!')
def check_loaded(self):
"""
Checks that spectrum is loaded.
"""
if not self.loaded:
raise ValueError('The spectrum is not loaded.')
def free_spectrum(self):
"""
Deletes the stored spectrum.
"""
self.wave = None
self.intens = None
self.error = None
self.loaded = False
self.hasErrors = False
def get_boundaries(self):
"""
Returns the minimal and the maximal wavelength
of the spectrum.
"""
self.read_size()
return self.wmin, self.wmax
def get_group(self, param):
"""
Get defined groups for a given parameter.
:param param: the parameter
:return: returns all groups assigned to a parameter
"""
if param.lower() in self.group:
return self.group[param]
else:
return None
def get_instrumental_width(self):
"""
Returns width of the instrumental profile.
:return:
"""
return self.instrumental_width
def get_sigma_from_continuum(self, cmin, cmax, store=True):
"""
Estimates the error of the flux from the scatter in
continuum.
:param cmin the minimal continuum value
:param cmax the maximal continuum value
:param store save the found error as an error
:return stddev the standard deviation
"""
# is the spectrum loaded ?
self.check_loaded()
# get the part around continue
intens = self.get_spectrum(wmin=cmin, wmax=cmax)[1]
# get the scatter
stddev = intens.std(ddof=1)
# save it as an error
if store:
self.global_error = stddev
self.error = stddev * np.ones(len(self.wave))
return stddev
def get_sigma_from_fft(self, nlast=20, store=True):
"""
Estimates the noise using the FFT.
:param nlast length opf the FFT spectrum tail used to estimate the scatter
:param store should we save the standard deviation
"""
# check that everything is loaded
self.check_loaded()
self.read_size()
# get the linear scale
lin_wave = np.linspace(self.wmin, self.wmax, self.npixel)
# interpolate to linear scale
tck = splrep(self.wave, self.intens)
lin_intens = splev(lin_wave, tck)
# perform the FFT and shift it
fft_intens = np.fft.fft(lin_intens)
# get absolute values
abs_fft_intens = np.absolute(fft_intens)
# get the high frequency tail
abs_fft_intens = abs_fft_intens[len(abs_fft_intens) / 2 - nlast + 1:len(abs_fft_intens) / 2 + nlast]
# estimate the error
stddev = abs_fft_intens.std() * abs_fft_intens.mean()
# store the value as an erro if needed
if store:
self.error = stddev * np.ones(len(self.wave))
self.global_error = stddev
return stddev
def get_spectrum(self, wmin=None, wmax=None):
"""
Returns the spectrum with wavelengths wmin -> wmax
:param wmin minimal wavelength
:param wmax maximal wavelength
:return wave, intens. error (optional) - the observed spectrum,
wavelength, intensity and error (if it is given)
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
else:
# the whole spectrum
if wmin is None and wmax is None:
if self.error is not None:
return self.wave.copy(), self.intens.copy(), self.error.copy()
else:
return self.wave.copy(), self.intens.copy()
else:
# corrects boundaries if needed
if wmin is None:
wmin = self.wmin
if wmax is None:
wmax = self.wmax
# What if we query too long spectrum
if (wmin-self.wmin) < -1e-6 or (wmax - self.wmax) > 1e-6:
raise ValueError("Querried spectral bounds (%f %f) lie outside "
"observed spectrum bounds (%f %f)." %
(wmin, wmax, self.wmin, self.wmax))
# selects the spectrum part
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
if self.error is not None:
return self.wave[ind].copy(), self.intens[ind].copy(), self.error[ind].copy()
else:
return self.wave[ind].copy(), self.intens[ind].copy()
def get_wavelength(self):
"""
Returns the wavelength vector.
OUPUT:
self.wave.. wavelengths
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
else:
return self.wave.copy()
def plot(self, ax=None, savefig=False, figname=None, **kwargs):
"""
:param figname
:param savefig
:param ax: AxesSubplot
:param kwargs:
:return:
"""
w = self.wave
i = self.intens
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
props = str({'filename': self.filename, 'component': self.component, 'korel': self.korel})
ax.plot(w, i, label=props, **kwargs)
ax.set_xlim(self.wmin, self.wmax)
ax.set_ylim(0.95*i.min(), 1.05*i.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=10)
# save the figure
if savefig:
if figname is None:
figname = self.filename + '.png'
# save the plot
plt.savefig(figname)
def read_size(self):
"""
Gets the minimal wavelength, maximal wavelenbgth
and the mean step. Linearity in wavelength is not
required.
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
self.wmin = self.wave.min()
self.wmax = self.wave.max()
self.npixel = len(self.wave)
self.step = np.mean(self.wave[1:] - self.wave[:-1])
def read_spectrum_from_file(self, filename, global_error=None):
"""
Reads the spectrum from a file. Following format
is assumed: %f %f %f (wavelength, intensity, error).
If user does not provide errors, we still attempt
to load teh spectrum.
:param filename spectrum source file
:param global_error the error applicable to the spectrum
:return None
"""
# just in case we have already set up the global error
if global_error is None and self.global_error is not None:
global_error = self.global_error
try:
# first we try to load 3 columns, i.e with errors
self.wave, self.intens, self.error = np.loadtxt(filename, unpack=True, usecols=[0, 1, 2])
self.hasErrors = True
except:
# we failed, so we attempt to load two columns
self.wave, self.intens = np.loadtxt(filename, unpack=True, usecols=[0, 1])
# error was not set up
if global_error is None:
warnings.warn("I found no errorbars of the observed intensities in file: %s! "
"I assume they will be provided later. I remember!!" % filename)
self.hasErrors = False
self.global_error = None
# error was set up
else:
self.error = global_error * np.ones(len(self.wave))
self.hasErrors = True
self.global_error = global_error
# the spectrum is marked as loaded
self.loaded = True
# the spectrum is checked
self.check_length()
self.read_size()
def reload_spectrum(self):
"""
Reloads the spectrum.
:return:
"""
if self.loaded is False:
warnings.warn('The spectrum was not loaded, so I am not reloading, but loading... just FYI.')
if self.filename is None:
raise ValueError('There has been no spectrum given for %s' % (str(self)))
self.read_spectrum_from_file(self.filename)
def select_random_subset(self, frac):
"""
:param frac: sepctrum fraction 0.0-1.0
:return:
"""
if not self.loaded:
raise AttributeError('Cannost select a subset. '
'The spectrum %s has not been loaded yet.' % (str(self)))
# set the newlength
newlength = int(np.ceil(frac*self.npixel))
if newlength >= self.npixel:
return
# surviving spectra indices
inds = np.sort(np.random.randint(self.npixel, size=newlength))
# adjustr the spectra
self.wave = self.wave[inds]
self.intens = self.intens[inds]
if self.error is not None:
self.error = self.error[inds]
# measure the spectrum
self.read_size()
def set_error(self, vec_error=None, global_error=None):
"""
Sets error to the spectrum..either local or global.
:param vec_error vector error len(vec_error) = len(spectrum)
:param global_error int float error applied to the whole spectrum
"""
if vec_error is not None:
self.error = vec_error
if len(vec_error) != len(self.npixel):
raise ValueError('The lenght of the error vector and the length of the spectrum do not match (%s, %s)'
% (len(vec_error), str(self.npixel)))
self.hasErrors = True
self.global_error = None
if global_error is not None:
self.error = global_error * np.ones(len(self.wave))
self.hasErrors = True
self.global_error = global_error
def set_group(self, group):
"""
Sets a group to the spectrum
:param group a dictionary of pairs parameter + group
"""
# print group
for key in group.keys():
self.group[key.lower()] = group[key]
def set_spectrum_from_arrays(self, wave, intens, error):
"""
Stores the spectrum from arrays. It is assumed
that user also provides error vector.
:param wave wavelength vector
:param intens intensity vector
:param error eror vector
"""
self.wave = wave
self.intens = intens
self.error = error
self.loaded = True
self.hasErrors = True
# checking and reading
self.check_length()
self.read_size()
| 15,903 | 33.498915 | 118 | py |
davis2017-evaluation | davis2017-evaluation-master/setup.py | from setuptools import setup
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, only Python >= 3.6 is supported')
setup(
python_requires='>=3.6, <4',
install_requires=[
'Pillow>=4.1.1',
'networkx>=2.0',
'numpy>=1.12.1',
'opencv-python>=4.0.0.21',
'pandas>=0.21.1',
'pathlib2;python_version<"3.5"',
'scikit-image>=0.13.1',
'scikit-learn>=0.18',
'scipy>=1.0.0',
'tqdm>=4.28.1'
],packages=['davis2017'])
| 506 | 23.142857 | 54 | py |
davis2017-evaluation | davis2017-evaluation-master/evaluation_method.py | import os
import sys
from time import time
import argparse
import numpy as np
import pandas as pd
from davis2017.evaluation import DAVISEvaluation
default_davis_path = '/path/to/the/folder/DAVIS'
time_start = time()
parser = argparse.ArgumentParser()
parser.add_argument('--davis_path', type=str, help='Path to the DAVIS folder containing the JPEGImages, Annotations, '
'ImageSets, Annotations_unsupervised folders',
required=False, default=default_davis_path)
parser.add_argument('--set', type=str, help='Subset to evaluate the results', default='val')
parser.add_argument('--task', type=str, help='Task to evaluate the results', default='unsupervised',
choices=['semi-supervised', 'unsupervised'])
parser.add_argument('--results_path', type=str, help='Path to the folder containing the sequences folders',
required=True)
args, _ = parser.parse_known_args()
csv_name_global = f'global_results-{args.set}.csv'
csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'
# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path):
print('Using precomputed results...')
table_g = pd.read_csv(csv_name_global_path)
table_seq = pd.read_csv(csv_name_per_sequence_path)
else:
print(f'Evaluating sequences for the {args.task} task...')
# Create dataset and evaluate
dataset_eval = DAVISEvaluation(davis_root=args.davis_path, task=args.task, gt_set=args.set)
metrics_res = dataset_eval.evaluate(args.results_path)
J, F = metrics_res['J'], metrics_res['F']
# Generate dataframe for the general results
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]),
np.mean(F["D"])])
g_res = np.reshape(g_res, [1, len(g_res)])
table_g = pd.DataFrame(data=g_res, columns=g_measures)
with open(csv_name_global_path, 'w') as f:
table_g.to_csv(f, index=False, float_format="%.3f")
print(f'Global results saved in {csv_name_global_path}')
# Generate a dataframe for the per sequence results
seq_names = list(J['M_per_object'].keys())
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
with open(csv_name_per_sequence_path, 'w') as f:
table_seq.to_csv(f, index=False, float_format="%.3f")
print(f'Per-sequence results saved in {csv_name_per_sequence_path}')
# Print the results
sys.stdout.write(f"--------------------------- Global results for {args.set} ---------------------------\n")
print(table_g.to_string(index=False))
sys.stdout.write(f"\n---------- Per sequence results for {args.set} ----------\n")
print(table_seq.to_string(index=False))
total_time = time() - time_start
sys.stdout.write('\nTotal time:' + str(total_time))
| 3,492 | 49.623188 | 118 | py |
davis2017-evaluation | davis2017-evaluation-master/evaluation_codalab.py | import sys
import os.path
from time import time
import numpy as np
import pandas
from davis2017.evaluation import DAVISEvaluation
task = 'semi-supervised'
gt_set = 'test-dev'
time_start = time()
# as per the metadata file, input and output directories are the arguments
if len(sys.argv) < 3:
input_dir = "input_dir"
output_dir = "output_dir"
debug = True
else:
[_, input_dir, output_dir] = sys.argv
debug = False
# unzipped submission data is always in the 'res' subdirectory
submission_path = os.path.join(input_dir, 'res')
if not os.path.exists(submission_path):
sys.exit('Could not find submission file {0}'.format(submission_path))
# unzipped reference data is always in the 'ref' subdirectory
gt_path = os.path.join(input_dir, 'ref')
if not os.path.exists(gt_path):
sys.exit('Could not find GT file {0}'.format(gt_path))
# Create dataset
dataset_eval = DAVISEvaluation(davis_root=gt_path, gt_set=gt_set, task=task, codalab=True)
# Check directory structure
res_subfolders = os.listdir(submission_path)
if len(res_subfolders) == 1:
sys.stdout.write(
"Incorrect folder structure, the folders of the sequences have to be placed directly inside the "
"zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n"
"The indexes have to match with the initial frame.\n")
sys.exit()
# Check that all sequences are there
missing = False
for seq in dataset_eval.dataset.get_sequences():
if seq not in res_subfolders:
sys.stdout.write(seq + " sequence is missing.\n")
missing = True
if missing:
sys.stdout.write(
"Verify also the folder structure, the folders of the sequences have to be placed directly inside "
"the zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n"
"The indexes have to match with the initial frame.\n")
sys.exit()
metrics_res = dataset_eval.evaluate(submission_path, debug=debug)
J, F = metrics_res['J'], metrics_res['F']
# Generate output to the stdout
seq_names = list(J['M_per_object'].keys())
if gt_set == "val" or gt_set == "train" or gt_set == "test-dev":
sys.stdout.write("----------------Global results in CSV---------------\n")
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]),
np.mean(F["D"])])
table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures)
table_g.to_csv(sys.stdout, index=False, float_format="%0.3f")
sys.stdout.write("\n\n------------Per sequence results in CSV-------------\n")
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pandas.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
table_seq.to_csv(sys.stdout, index=False, float_format="%0.3f")
# Write scores to a file named "scores.txt"
with open(os.path.join(output_dir, 'scores.txt'), 'w') as output_file:
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
output_file.write("GlobalMean: %f\n" % final_mean)
output_file.write("JMean: %f\n" % np.mean(J["M"]))
output_file.write("JRecall: %f\n" % np.mean(J["R"]))
output_file.write("JDecay: %f\n" % np.mean(J["D"]))
output_file.write("FMean: %f\n" % np.mean(F["M"]))
output_file.write("FRecall: %f\n" % np.mean(F["R"]))
output_file.write("FDecay: %f\n" % np.mean(F["D"]))
total_time = time() - time_start
sys.stdout.write('\nTotal time:' + str(total_time))
| 4,122 | 42.861702 | 140 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/utils.py | import os
import errno
import numpy as np
from PIL import Image
import warnings
from davis2017.davis import DAVIS
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def overlay_semantic_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
def generate_obj_proposals(davis_root, subset, num_proposals, save_path):
dataset = DAVIS(davis_root, subset=subset, codalab=True)
for seq in dataset.get_sequences():
save_dir = os.path.join(save_path, seq)
if os.path.exists(save_dir):
continue
all_gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
img_size = all_gt_masks.shape[2:]
num_rows = int(np.ceil(np.sqrt(num_proposals)))
proposals = np.zeros((num_proposals, len(all_masks_id), *img_size))
height_slices = np.floor(np.arange(0, img_size[0] + 1, img_size[0]/num_rows)).astype(np.uint).tolist()
width_slices = np.floor(np.arange(0, img_size[1] + 1, img_size[1]/num_rows)).astype(np.uint).tolist()
ii = 0
prev_h, prev_w = 0, 0
for h in height_slices[1:]:
for w in width_slices[1:]:
proposals[ii, :, prev_h:h, prev_w:w] = 1
prev_w = w
ii += 1
if ii == num_proposals:
break
prev_h, prev_w = h, 0
if ii == num_proposals:
break
os.makedirs(save_dir, exist_ok=True)
for i, mask_id in enumerate(all_masks_id):
mask = np.sum(proposals[:, i, ...] * np.arange(1, proposals.shape[0] + 1)[:, None, None], axis=0)
save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
def generate_random_permutation_gt_obj_proposals(davis_root, subset, save_path):
dataset = DAVIS(davis_root, subset=subset, codalab=True)
for seq in dataset.get_sequences():
gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
obj_swap = np.random.permutation(np.arange(gt_masks.shape[0]))
gt_masks = gt_masks[obj_swap, ...]
save_dir = os.path.join(save_path, seq)
os.makedirs(save_dir, exist_ok=True)
for i, mask_id in enumerate(all_masks_id):
mask = np.sum(gt_masks[:, i, ...] * np.arange(1, gt_masks.shape[0] + 1)[:, None, None], axis=0)
save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
def color_map(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
def save_mask(mask, img_path):
if np.max(mask) > 255:
raise ValueError('Maximum id pixel value is 255')
mask_img = Image.fromarray(mask.astype(np.uint8))
mask_img.putpalette(color_map().flatten().tolist())
mask_img.save(img_path)
def db_statistics(per_frame_values):
""" Compute mean,recall and decay from per-frame evaluation.
Arguments:
per_frame_values (ndarray): per-frame evaluation
Returns:
M,O,D (float,float,float):
return evaluation statistics: mean,recall,decay.
"""
# strip off nan values
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
M = np.nanmean(per_frame_values)
O = np.nanmean(per_frame_values > 0.5)
N_bins = 4
ids = np.round(np.linspace(1, len(per_frame_values), N_bins + 1) + 1e-10) - 1
ids = ids.astype(np.uint8)
D_bins = [per_frame_values[ids[i]:ids[i + 1] + 1] for i in range(0, 4)]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3])
return M, O, D
def list_files(dir, extension=".png"):
return [os.path.splitext(file_)[0] for file_ in os.listdir(dir) if file_.endswith(extension)]
def force_symlink(file1, file2):
try:
os.symlink(file1, file2)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(file2)
os.symlink(file1, file2)
| 6,009 | 33.342857 | 110 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/results.py | import os
import numpy as np
from PIL import Image
import sys
class Results(object):
def __init__(self, root_dir):
self.root_dir = root_dir
def _read_mask(self, sequence, frame_id):
try:
mask_path = os.path.join(self.root_dir, sequence, f'{frame_id}.png')
return np.array(Image.open(mask_path))
except IOError as err:
sys.stdout.write(sequence + " frame %s not found!\n" % frame_id)
sys.stdout.write("The frames have to be indexed PNG files placed inside the corespondent sequence "
"folder.\nThe indexes have to match with the initial frame.\n")
sys.stderr.write("IOError: " + err.strerror + "\n")
sys.exit()
def read_masks(self, sequence, masks_id):
mask_0 = self._read_mask(sequence, masks_id[0])
masks = np.zeros((len(masks_id), *mask_0.shape))
for ii, m in enumerate(masks_id):
masks[ii, ...] = self._read_mask(sequence, m)
num_objects = int(np.max(masks))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...]) > 0
return masks
| 1,236 | 37.65625 | 111 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/metrics.py | import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(bool)
segmentation = segmentation.astype(bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
| 6,823 | 33.464646 | 137 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/evaluation.py | import sys
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
from davis2017.davis import DAVIS
from davis2017.metrics import db_eval_boundary, db_eval_iou
from davis2017 import utils
from davis2017.results import Results
from scipy.optimize import linear_sum_assignment
class DAVISEvaluation(object):
def __init__(self, davis_root, task, gt_set, sequences='all', codalab=False):
"""
Class to evaluate DAVIS sequences from a certain set and for a certain task
:param davis_root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to compute the evaluation, chose between semi-supervised or unsupervised.
:param gt_set: Set to compute the evaluation
:param sequences: Sequences to consider for the evaluation, 'all' to use all the sequences in a set.
"""
self.davis_root = davis_root
self.task = task
self.dataset = DAVIS(root=davis_root, task=task, subset=gt_set, sequences=sequences, codalab=codalab)
@staticmethod
def _evaluate_semisupervised(all_gt_masks, all_res_masks, all_void_masks, metric):
if all_res_masks.shape[0] > all_gt_masks.shape[0]:
sys.stdout.write("\nIn your PNG files there is an index higher than the number of objects in the sequence!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res, f_metrics_res = np.zeros(all_gt_masks.shape[:2]), np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
if 'J' in metric:
j_metrics_res[ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
return j_metrics_res, f_metrics_res
@staticmethod
def _evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric, max_n_proposals=20):
if all_res_masks.shape[0] > max_n_proposals:
sys.stdout.write(f"\nIn your PNG files there is an index higher than the maximum number ({max_n_proposals}) of proposals allowed!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
f_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
for ii in range(all_gt_masks.shape[0]):
for jj in range(all_res_masks.shape[0]):
if 'J' in metric:
j_metrics_res[jj, ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[jj, ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'J' in metric and 'F' in metric:
all_metrics = (np.mean(j_metrics_res, axis=2) + np.mean(f_metrics_res, axis=2)) / 2
else:
all_metrics = np.mean(j_metrics_res, axis=2) if 'J' in metric else np.mean(f_metrics_res, axis=2)
row_ind, col_ind = linear_sum_assignment(-all_metrics)
return j_metrics_res[row_ind, col_ind, :], f_metrics_res[row_ind, col_ind, :]
def evaluate(self, res_path, metric=('J', 'F'), debug=False):
metric = metric if isinstance(metric, tuple) or isinstance(metric, list) else [metric]
if 'T' in metric:
raise ValueError('Temporal metric not supported!')
if 'J' not in metric and 'F' not in metric:
raise ValueError('Metric possible values are J for IoU or F for Boundary')
# Containers
metrics_res = {}
if 'J' in metric:
metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
if 'F' in metric:
metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
# Sweep all sequences
results = Results(root_dir=res_path)
for seq in tqdm(list(self.dataset.get_sequences())):
all_gt_masks, all_void_masks, all_masks_id = self.dataset.get_all_masks(seq, True)
if self.task == 'semi-supervised':
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
if self.task == 'unsupervised':
j_metrics_res, f_metrics_res = self._evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric)
elif self.task == 'semi-supervised':
j_metrics_res, f_metrics_res = self._evaluate_semisupervised(all_gt_masks, all_res_masks, None, metric)
for ii in range(all_gt_masks.shape[0]):
seq_name = f'{seq}_{ii+1}'
if 'J' in metric:
[JM, JR, JD] = utils.db_statistics(j_metrics_res[ii])
metrics_res['J']["M"].append(JM)
metrics_res['J']["R"].append(JR)
metrics_res['J']["D"].append(JD)
metrics_res['J']["M_per_object"][seq_name] = JM
if 'F' in metric:
[FM, FR, FD] = utils.db_statistics(f_metrics_res[ii])
metrics_res['F']["M"].append(FM)
metrics_res['F']["R"].append(FR)
metrics_res['F']["D"].append(FD)
metrics_res['F']["M_per_object"][seq_name] = FM
# Show progress
if debug:
sys.stdout.write(seq + '\n')
sys.stdout.flush()
return metrics_res
| 6,143 | 54.351351 | 143 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/davis.py | import os
from glob import glob
from collections import defaultdict
import numpy as np
from PIL import Image
class DAVIS(object):
SUBSET_OPTIONS = ['train', 'val', 'test-dev', 'test-challenge']
TASKS = ['semi-supervised', 'unsupervised']
DATASET_WEB = 'https://davischallenge.org/davis2017/code.html'
VOID_LABEL = 255
def __init__(self, root, task='unsupervised', subset='val', sequences='all', resolution='480p', codalab=False):
"""
Class to read the DAVIS dataset
:param root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to load the annotations, choose between semi-supervised or unsupervised.
:param subset: Set to load the annotations
:param sequences: Sequences to consider, 'all' to use all the sequences in a set.
:param resolution: Specify the resolution to use the dataset, choose between '480' and 'Full-Resolution'
"""
if subset not in self.SUBSET_OPTIONS:
raise ValueError(f'Subset should be in {self.SUBSET_OPTIONS}')
if task not in self.TASKS:
raise ValueError(f'The only tasks that are supported are {self.TASKS}')
self.task = task
self.subset = subset
self.root = root
self.img_path = os.path.join(self.root, 'JPEGImages', resolution)
annotations_folder = 'Annotations' if task == 'semi-supervised' else 'Annotations_unsupervised'
self.mask_path = os.path.join(self.root, annotations_folder, resolution)
year = '2019' if task == 'unsupervised' and (subset == 'test-dev' or subset == 'test-challenge') else '2017'
self.imagesets_path = os.path.join(self.root, 'ImageSets', year)
self._check_directories()
if sequences == 'all':
with open(os.path.join(self.imagesets_path, f'{self.subset}.txt'), 'r') as f:
tmp = f.readlines()
sequences_names = [x.strip() for x in tmp]
else:
sequences_names = sequences if isinstance(sequences, list) else [sequences]
self.sequences = defaultdict(dict)
for seq in sequences_names:
images = np.sort(glob(os.path.join(self.img_path, seq, '*.jpg'))).tolist()
if len(images) == 0 and not codalab:
raise FileNotFoundError(f'Images for sequence {seq} not found.')
self.sequences[seq]['images'] = images
masks = np.sort(glob(os.path.join(self.mask_path, seq, '*.png'))).tolist()
masks.extend([-1] * (len(images) - len(masks)))
self.sequences[seq]['masks'] = masks
def _check_directories(self):
if not os.path.exists(self.root):
raise FileNotFoundError(f'DAVIS not found in the specified directory, download it from {self.DATASET_WEB}')
if not os.path.exists(os.path.join(self.imagesets_path, f'{self.subset}.txt')):
raise FileNotFoundError(f'Subset sequences list for {self.subset} not found, download the missing subset '
f'for the {self.task} task from {self.DATASET_WEB}')
if self.subset in ['train', 'val'] and not os.path.exists(self.mask_path):
raise FileNotFoundError(f'Annotations folder for the {self.task} task not found, download it from {self.DATASET_WEB}')
def get_frames(self, sequence):
for img, msk in zip(self.sequences[sequence]['images'], self.sequences[sequence]['masks']):
image = np.array(Image.open(img))
mask = None if msk is None else np.array(Image.open(msk))
yield image, mask
def _get_all_elements(self, sequence, obj_type):
obj = np.array(Image.open(self.sequences[sequence][obj_type][0]))
all_objs = np.zeros((len(self.sequences[sequence][obj_type]), *obj.shape))
obj_id = []
for i, obj in enumerate(self.sequences[sequence][obj_type]):
all_objs[i, ...] = np.array(Image.open(obj))
obj_id.append(''.join(obj.split('/')[-1].split('.')[:-1]))
return all_objs, obj_id
def get_all_images(self, sequence):
return self._get_all_elements(sequence, 'images')
def get_all_masks(self, sequence, separate_objects_masks=False):
masks, masks_id = self._get_all_elements(sequence, 'masks')
masks_void = np.zeros_like(masks)
# Separate void and object masks
for i in range(masks.shape[0]):
masks_void[i, ...] = masks[i, ...] == 255
masks[i, masks[i, ...] == 255] = 0
if separate_objects_masks:
num_objects = int(np.max(masks[0, ...]))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...])
masks = masks > 0
return masks, masks_void, masks_id
def get_sequences(self):
for seq in self.sequences:
yield seq
if __name__ == '__main__':
from matplotlib import pyplot as plt
only_first_frame = True
subsets = ['train', 'val']
for s in subsets:
dataset = DAVIS(root='/home/csergi/scratch2/Databases/DAVIS2017_private', subset=s)
for seq in dataset.get_sequences():
g = dataset.get_frames(seq)
img, mask = next(g)
plt.subplot(2, 1, 1)
plt.title(seq)
plt.imshow(img)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.show(block=True)
| 5,514 | 43.837398 | 130 | py |
mapalignment | mapalignment-master/projects/mapalign/evaluate_funcs/evaluate_utils.py | import sys
import numpy as np
sys.path.append("../../utils")
import polygon_utils
def compute_batch_polygon_distances(gt_polygons_batch, aligned_disp_polygons_batch):
# Compute distances
distances = np.sqrt(np.sum(np.square(aligned_disp_polygons_batch - gt_polygons_batch), axis=-1))
min = np.nanmin(distances)
mean = np.nanmean(distances)
max = np.nanmax(distances)
return min, mean, max
def compute_threshold_accuracies(gt_vertices_batch, pred_vertices_batch, thresholds):
stripped_gt_polygons_list = []
stripped_pred_polygons_list = []
for gt_vertices, pred_vertices in zip(gt_vertices_batch, pred_vertices_batch):
for gt_polygon, pred_polygon in zip(gt_vertices, pred_vertices):
# Find first nan occurance
nan_indices = np.where(np.isnan(gt_polygon[:, 0]))[0]
if len(nan_indices):
nan_index = nan_indices[0]
if nan_index:
gt_polygon = gt_polygon[:nan_index, :]
pred_polygon = pred_polygon[:nan_index, :]
else:
# Empty polygon, break the for loop
break
gt_polygon = polygon_utils.strip_redundant_vertex(gt_polygon, epsilon=1e-3)
pred_polygon = polygon_utils.strip_redundant_vertex(pred_polygon, epsilon=1e-3)
stripped_gt_polygons_list.append(gt_polygon)
stripped_pred_polygons_list.append(pred_polygon)
if len(stripped_gt_polygons_list) == 0 or len(stripped_pred_polygons_list) == 0:
return []
stripped_gt_polygons = np.concatenate(stripped_gt_polygons_list)
stripped_pred_polygons = np.concatenate(stripped_pred_polygons_list)
distances = np.sqrt(np.sum(np.square(stripped_gt_polygons - stripped_pred_polygons), axis=-1))
# Compute thresholds count
threshold_accuracies = []
for threshold in thresholds:
accuracy = np.sum(distances <= threshold) / distances.size
threshold_accuracies.append(accuracy)
return threshold_accuracies
if __name__ == '__main__':
batch_size = 1
poly_count = 3
vertex_count = 4
gt_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))
gt_vertices[0, 0, 0, :] = [1, 2]
gt_vertices[0, 0, 1, :] = [3, 4]
gt_vertices[0, 0, 2, :] = np.nan
gt_vertices[0, 1, 0, :] = np.nan
pred_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))
pred_vertices[0, 0, 0, :] = [1, 2]
pred_vertices[0, 0, 1, :] = [3, 4]
pred_vertices[0, 0, 2, :] = np.nan
pred_vertices[0, 1, 0, :] = np.nan
thresholds = [1, 2, 3, 4, 5, 6, 7, 8]
threshold_accuracies = compute_threshold_accuracies(gt_vertices, pred_vertices, thresholds)
print("threshold_accuracies = {}".format(threshold_accuracies))
| 2,799 | 36.333333 | 100 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/preprocess_bradbury_buildings_multires.py | import sys
import os
import json
import math
import skimage.transform
import skimage.draw
import numpy as np
# from PIL import Image, ImageDraw
# Image.MAX_IMAGE_PIXELS = 200000000
import tensorflow as tf
import config_bradbury_buildings_multires as config
sys.path.append("../../../data/bradbury_buildings_roads_height_dataset")
import read
# sys.path.append("../utils")
# import visualization
sys.path.append("../../utils")
import tf_utils
import polygon_utils
import image_utils
# import python_utils
import math_utils
import dataset_utils
# if python_utils.module_exists("matplotlib.pyplot"):
# import matplotlib.pyplot as plt
def downsample_gt_data(image, metadata, gt_polygons, normed_disp_field_maps, downsampling_factor):
# First, correct the downsampling_factor so that:
# A downsampling_factor of 1 results in a final pixel_size equal to config.REFERENCE_PIXEL_SIZE
# A downsampling_factor of 2 results in a final pixel_size equal to 2 * config.REFERENCE_PIXEL_SIZE
corrected_downsampling_factor = downsampling_factor * config.REFERENCE_PIXEL_SIZE / metadata["pixelsize"]
scale = 1 / corrected_downsampling_factor
downsampled_image = skimage.transform.rescale(image, scale, order=3, preserve_range=True, multichannel=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_gt_polygons = polygon_utils.rescale_polygon(gt_polygons, scale)
downsampled_normed_disp_field_maps = np.empty((normed_disp_field_maps.shape[0],
round(
normed_disp_field_maps.shape[1] / corrected_downsampling_factor),
round(
normed_disp_field_maps.shape[2] / corrected_downsampling_factor),
normed_disp_field_maps.shape[3]))
for i in range(normed_disp_field_maps.shape[0]):
downsampled_normed_disp_field_maps[i] = skimage.transform.rescale(normed_disp_field_maps[i],
scale, order=3,
preserve_range=True, multichannel=True)
return downsampled_image, downsampled_gt_polygons, downsampled_normed_disp_field_maps
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value, spatial_shape):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
disp_polygon_maps = polygon_utils.draw_polygon_maps(disp_polygons_list, spatial_shape, fill=True,
edges=True, vertices=True)
return disp_polygons_list, disp_polygon_maps
def process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map, disp_field_maps, disp_polygon_maps,
gt_polygons=None, disp_polygons_list=None):
"""
Crops all inputs to patches generated with patch_stride and patch_res
:param patch_stride:
:param patch_res:
:param image:
:param gt_polygon_map:
:param disp_field_maps:
:param disp_polygon_maps:
:param gt_polygons:
:param disp_polygons_list:
:return:
"""
include_polygons = gt_polygons is not None and disp_polygons_list is not None
patches = []
patch_boundingboxes = image_utils.compute_patch_boundingboxes(image.shape[0:2],
stride=patch_stride,
patch_res=patch_res)
# print(patch_boundingboxes)
for patch_boundingbox in patch_boundingboxes:
# Crop image
patch_image = image[patch_boundingbox[0]:patch_boundingbox[2], patch_boundingbox[1]:patch_boundingbox[3], :]
if include_polygons:
patch_gt_polygons, \
patch_disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list,
patch_boundingbox)
else:
patch_gt_polygons = patch_disp_polygons_array = None
patch_gt_polygon_map = gt_polygon_map[patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_field_maps = disp_field_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_polygon_maps_array = disp_polygon_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
# Filter out patches based on presence of polygon and area ratio inside inner patch =
patch_inner_res = 2 * patch_stride
patch_padding = (patch_res - patch_inner_res) // 2
inner_patch_gt_polygon_map_corners = patch_gt_polygon_map[patch_padding:-patch_padding,
patch_padding:-patch_padding, 2]
if np.sum(inner_patch_gt_polygon_map_corners) \
and (not include_polygons or (include_polygons and patch_gt_polygons is not None)):
assert patch_image.shape[0] == patch_image.shape[
1], "image should be square otherwise tile_res cannot be defined"
tile_res = patch_image.shape[0]
disp_map_count = patch_disp_polygon_maps_array.shape[0]
patches.append({
"tile_res": tile_res,
"disp_map_count": disp_map_count,
"image": patch_image,
"gt_polygons": patch_gt_polygons,
"disp_polygons": patch_disp_polygons_array,
"gt_polygon_map": patch_gt_polygon_map,
"disp_field_maps": patch_disp_field_maps,
"disp_polygon_maps": patch_disp_polygon_maps_array,
})
return patches
def save_patch_to_tfrecord(patch, shard_writer):
# print(patch["disp_field_maps"].min() / 2147483647, patch["disp_field_maps"].max() / 2147483647)
# visualization.plot_field_map("disp_field_map", patch["disp_field_maps"][0])
# Compress image into jpg
image_raw = image_utils.convert_array_to_jpg_bytes(patch["image"], mode="RGB")
gt_polygon_map_raw = patch["gt_polygon_map"].tostring() # TODO: convert to png
disp_field_maps_raw = patch["disp_field_maps"].tostring()
disp_polygon_maps_raw = patch[
"disp_polygon_maps"].tostring() # TODO: convert to png (have to handle several png images...)
if patch["gt_polygons"] is not None and patch["disp_polygons"] is not None:
gt_polygons_raw = patch["gt_polygons"].tostring()
disp_polygons_raw = patch["disp_polygons"].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_count': tf_utils.int64_feature(patch["gt_polygons"].shape[0]),
'gt_polygon_length': tf_utils.int64_feature(patch["gt_polygons"].shape[1]),
'gt_polygons': tf_utils.bytes_feature(gt_polygons_raw),
'disp_polygons': tf_utils.bytes_feature(disp_polygons_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw)
}))
else:
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw),
}))
shard_writer.write(example.SerializeToString())
def process_image(dataset_raw_dirpath, image_info, overwrite_polygons_filename_extension, patch_stride, patch_res, downsampling_factors, disp_max_abs_value,
include_polygons,
downsampling_factor_writers):
"""
Writes to all the writers (one for each resolution) all sample patches extracted from the image_info.
:param raw_dirpath:
:param image_info:
:param patch_stride:
:param patch_res:
:param downsampling_factors:
:param disp_max_abs_value:
:param include_polygons:
:param downsampling_factor_writers:
:return:
"""
ori_image, ori_metadata, ori_gt_polygons = read.load_gt_data(dataset_raw_dirpath, image_info["city"],
image_info["number"], overwrite_polygons_filename_extension=overwrite_polygons_filename_extension)
if ori_gt_polygons is None:
return False
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
# Remove redundant vertices
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)
# visualization.init_figures(["gt_data"], figsize=(60, 40))
# visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)
# Create displacement maps
ori_normed_disp_field_maps = math_utils.create_displacement_field_maps(ori_image.shape[:2], config.DISP_MAP_COUNT,
config.DISP_MODES,
config.DISP_GAUSS_MU_RANGE,
config.DISP_GAUSS_SIG_SCALING) # TODO: uncomment
# ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2)) # TODO: remove
# # TODO: remove
# np.random.seed(seed=0)
# colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)
for index, downsampling_factor in enumerate(downsampling_factors):
print("downsampling_factor: {}".format(downsampling_factor))
# Downsample ground-truth
image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_metadata, ori_gt_polygons,
ori_normed_disp_field_maps, downsampling_factor)
spatial_shape = image.shape[:2]
# Random color
# image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1]) # TODO: remove
# Draw gt polygon map
gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
vertices=True)
# Generate final displacement
disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
disp_max_abs_value, spatial_shape)
# Compress data
gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
disp_field_maps = normed_disp_field_maps * 32767 # int16 max value = 32767
disp_field_maps = np.round(disp_field_maps)
disp_field_maps = disp_field_maps.astype(np.int16)
# Cut sample into patches
if include_polygons:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps,
gt_polygons, disp_polygons_list)
else:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps)
for patch in patches:
save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])
return True
def process_dataset(dataset_fold, dataset_raw_dirpath,
patch_stride, patch_res, image_info_list, overwrite_polygons_filename_extension,
data_aug_rot,
downsampling_factors,
disp_max_abs_value):
print("Processing images from {}".format(dataset_raw_dirpath))
for image_index, image_info in enumerate(image_info_list):
image_name = read.IMAGE_NAME_FORMAT.format(city=image_info["city"], number=image_info["number"])
print("Processing city {}, number {}. Progression: {}/{}"
.format(image_info["city"], image_info["number"], image_index + 1, len(image_info_list)))
include_polygons = (dataset_fold == "val" or dataset_fold == "test")
if data_aug_rot and dataset_fold == "train":
# Account for data augmentation when rotating patches on the training set
adjusted_patch_res = math.ceil(patch_res * math.sqrt(2))
adjusted_patch_stride = math.floor(
patch_stride * math.sqrt(
2) / 2) # Divided by 2 so that no pixels are left out when rotating by 45 degrees
else:
adjusted_patch_res = patch_res
adjusted_patch_stride = patch_stride
# Filter out downsampling_factors that are lower than city_min_downsampling_factor
image_downsampling_factors = [downsampling_factor for downsampling_factor in downsampling_factors if
image_info["min_downsampling_factor"] <= downsampling_factor]
# Create writers
writers = {}
for downsampling_factor in downsampling_factors:
filename_format = os.path.join(config.TFRECORDS_DIR,
config.TFRECORD_FILEPATH_FORMAT.format(dataset_fold, image_name,
downsampling_factor))
shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
writers[downsampling_factor] = shard_writer
process_image(dataset_raw_dirpath, image_info, overwrite_polygons_filename_extension,
adjusted_patch_stride, adjusted_patch_res,
image_downsampling_factors,
disp_max_abs_value,
include_polygons,
writers)
# Close writers
for downsampling_factor in downsampling_factors:
writers[downsampling_factor].close()
def save_metadata(meta_data_filepath, disp_max_abs_value, downsampling_factors):
data = {
"disp_max_abs_value": disp_max_abs_value,
"downsampling_factors": downsampling_factors,
}
with open(meta_data_filepath, 'w') as outfile:
json.dump(data, outfile)
def main():
# input("Prepare dataset, overwrites previous data. This can take a while (1h), press <Enter> to continue...")
# Create dataset tfrecords directory of it does not exist
if not os.path.exists(config.TFRECORDS_DIR):
os.makedirs(config.TFRECORDS_DIR)
# Save meta-data
meta_data_filepath = os.path.join(config.TFRECORDS_DIR, "metadata.txt")
save_metadata(meta_data_filepath, config.DISP_MAX_ABS_VALUE,
config.DOWNSAMPLING_FACTORS)
process_dataset("train",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.TRAIN_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("val",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.VAL_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("test",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.TEST_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
if __name__ == "__main__":
main()
| 17,281 | 47.68169 | 163 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/preprocess_aerial_image_multires.py | import sys
import os
import math
import json
import random
import skimage.transform
import numpy as np
import tensorflow as tf
import config_aerial_image_multires as config
sys.path.append("../../../data/AerialImageDataset")
import read
# sys.path.append("../utils")
# import visualization
sys.path.append("../../utils")
import tf_utils
import polygon_utils
import image_utils
import python_utils
import math_utils
import dataset_utils
import print_utils
if python_utils.module_exists("matplotlib.pyplot"):
import matplotlib.pyplot as plt
def downsample_gt_data(image, gt_polygons, normed_disp_field_maps, downsampling_factor):
downsampled_image = skimage.transform.rescale(image, 1 / downsampling_factor, order=3, preserve_range=True, multichannel=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_gt_polygons = polygon_utils.rescale_polygon(gt_polygons, 1 / downsampling_factor)
downsampled_normed_disp_field_maps = np.empty((normed_disp_field_maps.shape[0],
round(normed_disp_field_maps.shape[1] / downsampling_factor),
round(normed_disp_field_maps.shape[2] / downsampling_factor),
normed_disp_field_maps.shape[3]))
for i in range(normed_disp_field_maps.shape[0]):
downsampled_normed_disp_field_maps[i] = skimage.transform.rescale(normed_disp_field_maps[i], 1 / downsampling_factor, order=3, preserve_range=True, multichannel=True)
return downsampled_image, downsampled_gt_polygons, downsampled_normed_disp_field_maps
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value, spatial_shape):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
disp_polygon_maps = polygon_utils.draw_polygon_maps(disp_polygons_list, spatial_shape, fill=True,
edges=True, vertices=True)
return disp_polygons_list, disp_polygon_maps
def process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map, disp_field_maps, disp_polygon_maps,
gt_polygons=None, disp_polygons_list=None):
"""
Crops all inputs to patches generated with patch_stride and patch_res
:param patch_stride:
:param patch_res:
:param image:
:param gt_polygon_map:
:param disp_field_maps:
:param disp_polygon_maps:
:param gt_polygons:
:param disp_polygons_list:
:return:
"""
include_polygons = gt_polygons is not None and disp_polygons_list is not None
patches = []
patch_boundingboxes = image_utils.compute_patch_boundingboxes(image.shape[0:2],
stride=patch_stride,
patch_res=patch_res)
# print(patch_boundingboxes)
for patch_boundingbox in patch_boundingboxes:
# Crop image
patch_image = image[patch_boundingbox[0]:patch_boundingbox[2], patch_boundingbox[1]:patch_boundingbox[3], :]
if include_polygons:
patch_gt_polygons, \
patch_disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list,
patch_boundingbox)
else:
patch_gt_polygons = patch_disp_polygons_array = None
patch_gt_polygon_map = gt_polygon_map[patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_field_maps = disp_field_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_polygon_maps_array = disp_polygon_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
# Filter out patches based on presence of polygon and area ratio inside inner patch =
patch_inner_res = 2 * patch_stride
patch_padding = (patch_res - patch_inner_res) // 2
inner_patch_gt_polygon_map_corners = patch_gt_polygon_map[patch_padding:-patch_padding, patch_padding:-patch_padding, 2]
if np.sum(inner_patch_gt_polygon_map_corners) \
and (not include_polygons or (include_polygons and patch_gt_polygons is not None)):
assert patch_image.shape[0] == patch_image.shape[1], "image should be square otherwise tile_res cannot be defined"
tile_res = patch_image.shape[0]
disp_map_count = patch_disp_polygon_maps_array.shape[0]
patches.append({
"tile_res": tile_res,
"disp_map_count": disp_map_count,
"image": patch_image,
"gt_polygons": patch_gt_polygons,
"disp_polygons": patch_disp_polygons_array,
"gt_polygon_map": patch_gt_polygon_map,
"disp_field_maps": patch_disp_field_maps,
"disp_polygon_maps": patch_disp_polygon_maps_array,
})
return patches
def save_patch_to_tfrecord(patch, shard_writer):
# print(patch["disp_field_maps"].min() / 2147483647, patch["disp_field_maps"].max() / 2147483647)
# visualization.plot_field_map("disp_field_map", patch["disp_field_maps"][0])
# Compress image into jpg
image_raw = image_utils.convert_array_to_jpg_bytes(patch["image"], mode="RGB")
gt_polygon_map_raw = patch["gt_polygon_map"].tostring() # TODO: convert to png
disp_field_maps_raw = patch["disp_field_maps"].tostring()
disp_polygon_maps_raw = patch["disp_polygon_maps"].tostring() # TODO: convert to png (have to handle several png images...)
if patch["gt_polygons"] is not None and patch["disp_polygons"] is not None:
gt_polygons_raw = patch["gt_polygons"].tostring()
disp_polygons_raw = patch["disp_polygons"].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_count': tf_utils.int64_feature(patch["gt_polygons"].shape[0]),
'gt_polygon_length': tf_utils.int64_feature(patch["gt_polygons"].shape[1]),
'gt_polygons': tf_utils.bytes_feature(gt_polygons_raw),
'disp_polygons': tf_utils.bytes_feature(disp_polygons_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw)
}))
else:
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw),
}))
shard_writer.write(example.SerializeToString())
def process_image(dataset_raw_dirpath, image_info, overwrite_polygon_dir_name, patch_stride, patch_res, downsampling_factors, disp_max_abs_value, include_polygons,
downsampling_factor_writers):
"""
Writes to all the writers (one for each resolution) all sample patches extracted from the image at location image_filepath.
:param dataset_raw_dirpath:
:param image_info:
:param overwrite_polygon_dir_name:
:param patch_stride:
:param patch_res:
:param downsampling_factors:
:param disp_max_abs_value:
:param include_polygons:
:param downsampling_factor_writers:
:return:
"""
ori_image, ori_metadata, ori_gt_polygons = read.load_gt_data(dataset_raw_dirpath, image_info["city"], image_info["number"], overwrite_polygon_dir_name=overwrite_polygon_dir_name)
if ori_gt_polygons is None:
return False
# visualization.init_figures(["gt_data"], figsize=(60, 40))
# visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)
# Create displacement maps
ori_normed_disp_field_maps = math_utils.create_displacement_field_maps(ori_image.shape[:2], config.DISP_MAP_COUNT,
config.DISP_MODES, config.DISP_GAUSS_MU_RANGE,
config.DISP_GAUSS_SIG_SCALING) # TODO: uncomment
# ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2)) # TODO: remove
# # TODO: remove
# np.random.seed(seed=0)
# colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)
for index, downsampling_factor in enumerate(downsampling_factors):
print("downsampling_factor: {}".format(downsampling_factor))
# Downsample ground-truth
image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_gt_polygons, ori_normed_disp_field_maps, downsampling_factor)
spatial_shape = image.shape[:2]
# Random color
# image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1]) # TODO: remove
# Draw gt polygon map
gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
vertices=True)
# Generate final displacement
disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
disp_max_abs_value, spatial_shape)
# Compress data
gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
disp_field_maps = normed_disp_field_maps * 32767 # int16 max value = 32767
disp_field_maps = np.round(disp_field_maps)
disp_field_maps = disp_field_maps.astype(np.int16)
# Cut sample into patches
if include_polygons:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps,
gt_polygons, disp_polygons_list)
else:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps)
for patch in patches:
save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])
return True
def process_dataset(dataset_fold, dataset_raw_dirpath,
image_info_list, overwrite_polygon_dir_name, patch_stride, patch_res,
data_aug_rot,
downsampling_factors,
disp_max_abs_value):
print("Processing images from {}".format(dataset_raw_dirpath))
for image_index, image_info in enumerate(image_info_list):
print("Processing city {}. Progression: {}/{}"
.format(image_info["city"], image_index + 1, len(image_info_list)))
if "number" in image_info:
# This is one image
tile_info_list = [image_info]
elif "numbers" in image_info:
# This is multiple images
tile_info_list = [
{
"city": image_info["city"],
"number": number,
"min_downsampling_factor": image_info["min_downsampling_factor"],
}
for number in image_info["numbers"]
]
else:
print_utils.print_warning(
"WARNING: image_info dict should have one of those keys: \"number\" or \"numbers\"")
tile_info_list = []
for tile_info in tile_info_list:
image_name = read.IMAGE_NAME_FORMAT.format(city=tile_info["city"], number=tile_info["number"])
print("Processing city {}, number {}"
.format(tile_info["city"], tile_info["number"]))
include_polygons = (dataset_fold == "val" or dataset_fold == "test")
if data_aug_rot and dataset_fold == "train":
# Account for data augmentation when rotating patches on the training set
adjusted_patch_res = math.ceil(patch_res * math.sqrt(2))
adjusted_patch_stride = math.floor(
patch_stride * math.sqrt(
2) / 2) # Divided by 2 so that no pixels are left out when rotating by 45 degrees
else:
adjusted_patch_res = patch_res
adjusted_patch_stride = patch_stride
# Filter out downsampling_factors that are lower than city_min_downsampling_factor
image_downsampling_factors = [downsampling_factor for downsampling_factor in downsampling_factors if
tile_info["min_downsampling_factor"] <= downsampling_factor]
# Create writers
writers = {}
for downsampling_factor in downsampling_factors:
filename_format = os.path.join(config.TFRECORDS_DIR,
config.TFRECORD_FILEPATH_FORMAT.format(dataset_fold, image_name,
downsampling_factor))
shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
writers[downsampling_factor] = shard_writer
process_image(dataset_raw_dirpath, tile_info, overwrite_polygon_dir_name,
adjusted_patch_stride, adjusted_patch_res,
image_downsampling_factors,
disp_max_abs_value,
include_polygons,
writers)
# Close writers
for downsampling_factor in downsampling_factors:
writers[downsampling_factor].close()
def save_metadata(meta_data_filepath, disp_max_abs_value, downsampling_factors):
data = {
"disp_max_abs_value": disp_max_abs_value,
"downsampling_factors": downsampling_factors,
}
with open(meta_data_filepath, 'w') as outfile:
json.dump(data, outfile)
def main():
random.seed(0)
# Create dataset tfrecords directory of it does not exist
if not os.path.exists(config.TFRECORDS_DIR):
os.makedirs(config.TFRECORDS_DIR)
# Save meta-data
meta_data_filepath = os.path.join(config.TFRECORDS_DIR, "metadata.txt")
save_metadata(meta_data_filepath, config.DISP_MAX_ABS_VALUE, config.DOWNSAMPLING_FACTORS)
# Save data
process_dataset("train",
config.DATASET_RAW_DIRPATH,
config.TRAIN_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("val",
config.DATASET_RAW_DIRPATH,
config.VAL_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("test",
config.DATASET_RAW_DIRPATH,
config.TEST_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
if __name__ == "__main__":
main()
| 16,812 | 46.360563 | 182 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/dataset_multires.py | import sys
import os
import math
import tensorflow as tf
sys.path.append("../utils") # Mapalign sub-projects utils
import visualization
import skimage.io
sys.path.append("../../utils") # Projects utils
import tf_utils
import python_utils
STRING_QUEUE_CAPACITY = 4000
MIN_QUEUE_EXAMPLES = 2000
def all_items_are_integers(l):
result = True
for i in l:
if type(i) is not int:
result = False
break
return result
def get_all_shards(shard_filepath_format):
shard_filepath_list = []
shard_index = 0
stop = False
while not stop:
shard_filepath = shard_filepath_format.format(shard_index)
if os.path.exists(shard_filepath):
shard_filepath_list.append(shard_filepath)
shard_index += 1
else:
stop = True
return shard_filepath_list
def create_dataset_filename_list(tfrecords_dir_list, tfrecord_filename_format, downsampling_factors, dataset="train",
resolution_file_repeats=None):
if resolution_file_repeats is None:
resolution_file_repeats = [1] * len(downsampling_factors)
assert len(downsampling_factors) == len(resolution_file_repeats), \
"Downsampling_factors and sample_resolution_prob_weights must have the same number of elements"
assert all_items_are_integers(resolution_file_repeats), "All repeat count should be integers"
dataset_filename_list = []
for tfrecords_dir in tfrecords_dir_list:
# Find dataset dir
dataset_dir = os.path.join(tfrecords_dir, dataset)
# Find all images in dataset dir
image_dir_name_list = os.listdir(dataset_dir)
for image_dir_name in image_dir_name_list:
image_dir = os.path.join(dataset_dir, image_dir_name)
for downsampling_factor, resolution_file_repeat in zip(downsampling_factors, resolution_file_repeats):
shard_filepath_format = os.path.join(image_dir, tfrecord_filename_format.format(downsampling_factor))
shard_filepath_list = get_all_shards(shard_filepath_format)
repeated_filepaths = shard_filepath_list * resolution_file_repeat # Repeat filepaths
dataset_filename_list.extend(repeated_filepaths)
return dataset_filename_list
def rotate_poly_map(poly_map, angle):
# Apply NEAREST to corner channels and BILINEAR to the others
gt_polygon_map_area, gt_polygon_map_edges, gt_polygon_corners = tf.unstack(poly_map, axis=-1)
gt_polygon_map_area = tf.contrib.image.rotate(gt_polygon_map_area, angle, interpolation='BILINEAR')
gt_polygon_map_edges = tf.contrib.image.rotate(gt_polygon_map_edges, angle, interpolation='BILINEAR')
gt_polygon_corners = tf.contrib.image.rotate(gt_polygon_corners, angle, interpolation='NEAREST')
poly_map = tf.stack([gt_polygon_map_area, gt_polygon_map_edges, gt_polygon_corners], axis=-1)
return poly_map
def rotate_field_vectors(field_map, angle):
"""
Just rotates every vector of disp_field_map by angle. Does not rotate the spatial support (which is rotated in rotate_poly_map())
:param field_map:
:param angle: (in rad.)
:return:
"""
field_map_shape = tf.shape(field_map) # Save shape for later reshape
tile_resfield_map = tf.reshape(field_map, [-1, 2]) # Convert to a list of vectors
rot_mat = tf.cast(tf.stack([(tf.cos(-angle), -tf.sin(-angle)), (tf.sin(-angle), tf.cos(-angle))], axis=0),
tf.float32)
tile_resfield_map = tf.cast(tile_resfield_map, tf.float32)
tile_resfield_map = tf.matmul(tile_resfield_map, rot_mat)
tile_resfield_map = tf.reshape(tile_resfield_map,
field_map_shape) # Reshape back to field of vectors
return tile_resfield_map
def crop_or_pad_many(image_list, res):
assert type(res) == int, "type(res) should be int"
image_batch = tf.stack(image_list, axis=0)
cropped_image_batch = tf.image.resize_image_with_crop_or_pad(image=image_batch, target_height=res, target_width=res)
cropped_image_list = tf.unstack(cropped_image_batch, axis=0)
return cropped_image_list
def corners_in_inner_patch(poly_map, patch_inner_res):
cropped_disp_polygon_map = tf.image.resize_image_with_crop_or_pad(image=poly_map,
target_height=patch_inner_res,
target_width=patch_inner_res)
_, _, disp_polygon_map_corners = tf.unstack(cropped_disp_polygon_map, axis=-1)
result = tf.cast(tf.reduce_sum(disp_polygon_map_corners), dtype=tf.bool)
return result
def field_map_flip_up_down(field_map):
field_map = tf.image.flip_up_down(field_map)
field_map_row, field_map_col = tf.unstack(field_map, axis=-1)
field_map = tf.stack([-field_map_row, field_map_col], axis=-1)
return field_map
def drop_components(polygon_map, keep_poly_prob, seed=None):
"""
Randomly removes some connected components from polygon_map (which amounts to removing some polygons).
:param polygon_map: The filtered polygon map raster
:param keep_poly_prob: Probability of a polygon to be kept
:param seed:
:return:
"""
if keep_poly_prob == 1:
# Keep all
return polygon_map
elif keep_poly_prob == 0:
# Remove all
zeroed_polygon_map_zeros = tf.zeros_like(polygon_map)
return zeroed_polygon_map_zeros
try:
with tf.name_scope('drop_components'):
# Compute connected components on the first channel of polygon_map (the polygon fill channel):
connected_components = tf.contrib.image.connected_components(polygon_map[:, :, 0])
# Get maximum component label:
connected_component_max = tf.reduce_max(connected_components)
# Randomize component labels (but keep the background label "0" the same):
connected_components_shape = tf.shape(connected_components)
connected_components = tf.reshape(connected_components, [-1])
random_values = tf.random_uniform((connected_component_max,), dtype=tf.float32,
seed=seed) # Don't draw a random number for the background label 0.
random_values = tf.pad(random_values, [[1, 0]], "CONSTANT",
constant_values=1) # Add 1 at the beginning of the array so that the background has a zero probability to be kept
connected_component_random_values = tf.gather(random_values, connected_components)
connected_component_random_values = tf.reshape(connected_component_random_values,
connected_components_shape)
# Threshold randomized components:
mask = tf.expand_dims(
tf.cast(
tf.less(connected_component_random_values, keep_poly_prob),
dtype=tf.float32
),
axis=-1)
# Filter polygon_map with mask:
mask = tf_utils.dilate(mask, filter_size=3) # Dilate to take polygon outlines inside the mask
masked_polygon_map = mask * polygon_map
return masked_polygon_map
except AttributeError:
print(
"WARNING: Tensorflow {} does not have connected_components() implemented. Keeping all components regardless of keep_poly_prob.".format(
tf.__version__))
return polygon_map
def read_and_decode(tfrecord_filepaths, patch_inner_res, patch_outer_res, batch_size,
dynamic_range, disp_map_dynamic_range_fac=0.5, keep_poly_prob=None, data_aug=False, train=True,
seed=None):
"""
Reads examples from the tfrecord.
If train = True, polygon data will not be served as it cannot be shuffled easily (varying-sized tensors).
Set to False for validation and test only (where shuffling does not matter)
:param tfrecord_filepaths:
:param patch_inner_res:
:param patch_outer_res:
:param batch_size:
:param dynamic_range:
:param disp_map_dynamic_range_fac:
:param keep_poly_prob: If not None, the fraction of disp_polygon that are kept
:param data_aug:
:param train:
:return:
"""
assert 0 < len(tfrecord_filepaths), "tfrecord_filepaths should contain at least one element"
with tf.name_scope('read_and_decode'):
filename_queue = tf.train.string_input_producer(tfrecord_filepaths, shuffle=True, seed=seed, capacity=STRING_QUEUE_CAPACITY + 3 * batch_size)
# reader_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
# reader = tf.TFRecordReader(options=reader_options)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if train:
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'tile_res': tf.FixedLenFeature([], tf.int64),
'disp_map_count': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
'gt_polygon_map': tf.FixedLenFeature([], tf.string),
'disp_field_maps': tf.FixedLenFeature([], tf.string),
'disp_polygon_maps': tf.FixedLenFeature([], tf.string)
})
disp_map_count = tf.cast(features['disp_map_count'], tf.int64)
gt_polygons = None
disp_polygons_array = None
else:
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'tile_res': tf.FixedLenFeature([], tf.int64),
'disp_map_count': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
'gt_polygon_count': tf.FixedLenFeature([], tf.int64),
'gt_polygon_length': tf.FixedLenFeature([], tf.int64),
'gt_polygons': tf.FixedLenFeature([], tf.string),
'disp_polygons': tf.FixedLenFeature([], tf.string),
'gt_polygon_map': tf.FixedLenFeature([], tf.string),
'disp_field_maps': tf.FixedLenFeature([], tf.string),
'disp_polygon_maps': tf.FixedLenFeature([], tf.string)
})
disp_map_count = tf.cast(features['disp_map_count'], tf.int64)
gt_polygon_count = tf.cast(features['gt_polygon_count'], tf.int64)
gt_polygon_length = tf.cast(features['gt_polygon_length'], tf.int64)
gt_polygons_flat = tf.decode_raw(features['gt_polygons'], tf.float16)
disp_polygons_flat = tf.decode_raw(features['disp_polygons'], tf.float16)
gt_polygons_shape = tf.stack([gt_polygon_count, gt_polygon_length, 2])
gt_polygons = tf.reshape(gt_polygons_flat, gt_polygons_shape)
disp_polygons_shape = tf.stack([disp_map_count, gt_polygon_count, gt_polygon_length, 2])
disp_polygons_array = tf.reshape(disp_polygons_flat, disp_polygons_shape)
tile_res = tf.cast(features['tile_res'], tf.int64)
image_flat = tf.image.decode_jpeg(features['image']) # TODO: use dct_method="INTEGER_ACCURATE"?
gt_polygon_map_flat = tf.decode_raw(features['gt_polygon_map'], tf.uint8)
disp_field_maps_flat = tf.decode_raw(features['disp_field_maps'], tf.int16)
disp_polygon_maps_flat = tf.decode_raw(features['disp_polygon_maps'], tf.uint8)
# return image_flat, None, None, gt_polygon_map_flat, disp_field_maps_flat, disp_polygon_maps_flat
# Reshape tensors
image_shape = tf.stack([tile_res, tile_res, 3])
gt_polygon_map_shape = tf.stack([tile_res, tile_res, 3])
disp_field_maps_shape = tf.stack([disp_map_count, tile_res, tile_res, 2])
disp_polygon_maps_shape = tf.stack([disp_map_count, tile_res, tile_res, 3])
image = tf.reshape(image_flat, image_shape)
gt_polygon_map = tf.reshape(gt_polygon_map_flat, gt_polygon_map_shape)
disp_field_maps = tf.reshape(disp_field_maps_flat, disp_field_maps_shape)
disp_polygon_maps = tf.reshape(disp_polygon_maps_flat, disp_polygon_maps_shape)
# return image, None, None, gt_polygon_map, disp_field_maps, disp_polygon_maps
# Choose disp map:
disp_map_index = tf.random_uniform([], maxval=disp_map_count, dtype=tf.int64, seed=seed)
disp_polygons = None
if not train:
disp_polygons = disp_polygons_array[disp_map_index, :, :, :]
disp_field_map = disp_field_maps[disp_map_index, :, :, :]
disp_polygon_map = disp_polygon_maps[disp_map_index, :, :, :]
# return image, None, None, gt_polygon_map, tf.expand_dims(disp_field_map, axis=0), tf.expand_dims(disp_polygon_map, axis=0)
# Normalize data
image = image / 255
gt_polygon_map = gt_polygon_map / 255
disp_polygon_map = disp_polygon_map / 255
disp_field_map = disp_map_dynamic_range_fac * tf.cast(disp_field_map,
dtype=tf.float32) / 32767 # Within [-disp_map_dynamic_range_fac, disp_map_dynamic_range_fac]
if keep_poly_prob is not None:
# Remove some polygons from disp_polygon_map
disp_polygon_map = drop_components(disp_polygon_map, keep_poly_prob, seed=seed)
# return tf.expand_dims(image, axis=0), gt_polygons, disp_polygons, tf.expand_dims(gt_polygon_map, axis=0), tf.expand_dims(disp_field_map, axis=0), tf.expand_dims(disp_polygon_map, axis=0)
# Perturb image brightness, contrast, saturation, etc.
if data_aug:
image = tf.image.random_brightness(image, 0.25)
image = tf.image.random_contrast(image, 0.8, 1.2)
image = tf.image.random_saturation(image, 0.8, 1.2)
# Rotate
if train and data_aug: # data_aug rototation only applies to train (val includes polygons that should be also rotated if you want to augment val as well)
# Pad to avoid losing parts of the image after rotation
rot_patch_outer_res = int(math.ceil(patch_outer_res * math.sqrt(2)))
rot_patch_inner_res = int(math.ceil(patch_inner_res * math.sqrt(2)))
image, gt_polygon_map, disp_polygon_map = crop_or_pad_many([image, gt_polygon_map, disp_polygon_map],
rot_patch_outer_res)
disp_field_map = tf.image.resize_image_with_crop_or_pad(
image=disp_field_map,
target_height=rot_patch_inner_res,
target_width=rot_patch_inner_res)
# Apply the rotations on the spatial support
angle = tf.random_uniform([], maxval=2 * math.pi, dtype=tf.float32, seed=seed)
image = tf.contrib.image.rotate(image, angle, interpolation='BILINEAR')
gt_polygon_map = rotate_poly_map(gt_polygon_map, angle)
disp_polygon_map = rotate_poly_map(disp_polygon_map, angle)
disp_field_map = tf.contrib.image.rotate(disp_field_map, angle, interpolation='BILINEAR')
# Rotate only the vectors for every pixel of disp_field_map
disp_field_map = rotate_field_vectors(disp_field_map, angle)
# Crop to final patch_res
# patch_outer_res = 312
image, gt_polygon_map, disp_polygon_map = crop_or_pad_many([image, gt_polygon_map, disp_polygon_map],
patch_outer_res)
disp_field_map = tf.image.resize_image_with_crop_or_pad(
image=disp_field_map,
target_height=patch_inner_res,
target_width=patch_inner_res)
# Shift dynamic range of image to be in [-1, 1]
image = image * (dynamic_range[1] - dynamic_range[0]) + dynamic_range[0]
image = tf.clip_by_value(image, dynamic_range[0], dynamic_range[1])
# return image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map
# # Dilate polygon maps
# gt_polygon_map = tf_utils.dilate(gt_polygon_map, filter_size=2)
# disp_polygon_map = tf_utils.dilate(disp_polygon_map, filter_size=2)
if data_aug:
# Apply random flips
flip = tf.random_uniform([], dtype=tf.float16, seed=seed)
flip_outputs = tf.cond(0.5 <= flip,
lambda: (tf.image.flip_up_down(image),
tf.image.flip_up_down(gt_polygon_map),
field_map_flip_up_down(disp_field_map),
tf.image.flip_up_down(disp_polygon_map)),
lambda: (image, gt_polygon_map, disp_field_map, disp_polygon_map))
image, gt_polygon_map, disp_field_map, disp_polygon_map = flip_outputs
# Add batch dimension (to be able to use enqueue_many=True)
image = tf.expand_dims(image, 0)
if not train:
gt_polygons = tf.expand_dims(gt_polygons, 0)
disp_polygons = tf.expand_dims(disp_polygons, 0)
gt_polygon_map = tf.expand_dims(gt_polygon_map, 0)
disp_field_map = tf.expand_dims(disp_field_map, 0)
disp_polygon_map = tf.expand_dims(disp_polygon_map, 0)
# Remove patches with too little data for training (that have no corners in inner patch)
include_patch = corners_in_inner_patch(gt_polygon_map, patch_inner_res)
empty = tf.constant([], tf.int32)
if train:
image, \
gt_polygon_map, \
disp_field_map, \
disp_polygon_map = tf.cond(include_patch,
lambda: [image, gt_polygon_map,
disp_field_map, disp_polygon_map],
lambda: [tf.gather(image, empty),
tf.gather(gt_polygon_map, empty),
tf.gather(disp_field_map, empty),
tf.gather(disp_polygon_map, empty)])
else:
image, \
gt_polygons, \
disp_polygons, \
gt_polygon_map, \
disp_field_map, \
disp_polygon_map = tf.cond(include_patch,
lambda: [image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map,
disp_polygon_map],
lambda: [
tf.gather(image, empty),
tf.gather(gt_polygons, empty),
tf.gather(disp_polygons, empty),
tf.gather(gt_polygon_map, empty),
tf.gather(disp_field_map, empty),
tf.gather(disp_polygon_map, empty)])
if train:
image_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = tf.train.shuffle_batch(
[image, gt_polygon_map, disp_field_map, disp_polygon_map],
batch_size=batch_size,
capacity=MIN_QUEUE_EXAMPLES + 3 * batch_size,
min_after_dequeue=MIN_QUEUE_EXAMPLES,
num_threads=8,
seed=seed,
enqueue_many=True,
allow_smaller_final_batch=False)
return image_batch, None, None, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch
else:
image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = tf.train.batch(
[image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map],
batch_size=batch_size,
num_threads=8,
dynamic_pad=True,
enqueue_many=True,
allow_smaller_final_batch=False)
return image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch
def main():
seed = 0
data_dir = python_utils.choose_first_existing_path([
"/local/shared/epitome-polygon-deep-learning/data", # Try local node first
"/home/nigirard/epitome-polygon-deep-learning/data",
"/workspace/data", # Try inside Docker image
])
tfrecords_dir_list = [
# os.path.join(data_dir, "AerialImageDataset/tfrecords.mapalign.multires"),
os.path.join(data_dir, "bradbury_buildings_roads_height_dataset/tfrecords.mapalign.multires"),
# os.path.join(data_dir, "mapping_challenge_dataset/tfrecords.mapalign.multires"),
]
print("tfrecords_dir_list:")
print(tfrecords_dir_list)
# downsampling_factors = [1, 2, 4, 8]
# resolution_file_repeats = [1, 4, 16, 64]
tfrecord_filename_format = "ds_fac_{:02d}.{{:06d}}.tfrecord"
downsampling_factors = [1]
resolution_file_repeats = [1]
dataset_filename_list = create_dataset_filename_list(tfrecords_dir_list, tfrecord_filename_format,
downsampling_factors,
dataset="train",
resolution_file_repeats=resolution_file_repeats)
print("Length of dataset_filename_list:")
print(len(dataset_filename_list))
patch_outer_res = 220
patch_inner_res = 100
padding = (patch_outer_res - patch_inner_res) // 2
disp_max_abs_value = 4
batch_size = 32
dynamic_range = [-1, 1]
keep_poly_prob = 0.1 # Default: 0.1
data_aug = True
train = True
# Even when reading in multiple threads, share the filename
# queue.
image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map = read_and_decode(
dataset_filename_list,
patch_inner_res,
patch_outer_res,
batch_size,
dynamic_range,
keep_poly_prob=keep_poly_prob,
data_aug=data_aug,
train=train,
seed=seed)
# The op for initializing the variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Let's read off 3 batches just for example
for i in range(30000):
print("---- {} ---".format(i))
if train:
image_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = sess.run(
[image, gt_polygon_map, disp_field_map, disp_polygon_map])
else:
image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = sess.run(
[image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map])
print(gt_polygons_batch[0, 0, 0, :])
print(disp_polygons_batch[0, 0, 0, :])
print(gt_polygons_batch.shape)
print(disp_polygons_batch.shape)
print(image_batch.shape)
print(gt_polygon_map_batch.shape)
print(disp_field_map_batch.shape)
print(disp_polygon_map_batch.shape)
# np.set_printoptions(threshold=np.nan)
# print(image_batch)
# print(gt_polygon_map_batch)
# print(disp_field_map_batch)
# print(disp_polygon_map_batch)
print("image_batch:")
print(image_batch.min())
print(image_batch.max())
print("gt_polygon_map_batch:")
print(gt_polygon_map_batch.min())
print(gt_polygon_map_batch.max())
try:
print(disp_field_map_batch[:, :, :, 0].min())
print(disp_field_map_batch[:, :, :, 0].max())
except IndexError:
print("Skip min and max of disp_field_map_batch because of wrong rank")
# visualization.plot_field_map("disp_field_map", disp_field_map_batch[0])
print("disp_polygon_map_batch:")
print(disp_polygon_map_batch.min())
print(disp_polygon_map_batch.max())
dynamic_range = [-1, 1]
image_batch = (image_batch - dynamic_range[0]) / (
dynamic_range[1] - dynamic_range[0])
disp_field_map_batch = disp_field_map_batch * 2 # Within [-1, 1]
disp_field_map_batch = disp_field_map_batch * disp_max_abs_value # Within [-disp_max_abs_value, disp_max_abs_value]
# gt_polygon_map_batch *= 0 # TODO: Remove
# for batch_index in range(batch_size):
# if train:
# visualization.init_figures(["example"])
# # visualization.plot_example("example",
# # image_batch[batch_index],
# # gt_polygon_map_batch[batch_index],
# # disp_field_map_batch[batch_index],
# # disp_polygon_map_batch[batch_index])
# visualization.plot_example("example",
# image_batch[batch_index],
# disp_polygon_map_batch[batch_index])
# else:
# visualization.init_figures(["example", "example polygons"])
# visualization.plot_example("example",
# image_batch[batch_index],
# gt_polygon_map_batch[batch_index],
# disp_field_map_batch[batch_index],
# disp_polygon_map_batch[batch_index])
# visualization.plot_example_polygons("example polygons",
# image_batch[batch_index],
# gt_polygons_batch[batch_index],
# disp_polygons_batch[batch_index])
# input("Press <Enter> to continue...")
skimage.io.imsave("misaligned_polygon_raster.png", disp_polygon_map_batch[0])
skimage.io.imsave("image.png", image_batch[0])
disp_field_map_image = visualization.flow_to_image(disp_field_map_batch[0])
skimage.io.imsave("displacement_field_map.png", disp_field_map_image)
segmentation = gt_polygon_map_batch[0][padding:-padding, padding:-padding, :]
skimage.io.imsave("segmentation.png", segmentation)
# input("Press <Enter> to continue...")
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
main()
| 27,745 | 47.422339 | 197 | py |
mapalignment | mapalignment-master/projects/mapalign/utils/visualization.py | import os
import sys
import numpy as np
import cv2
current_filepath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(current_filepath, "../../utils"))
import python_utils
import polygon_utils
# Try importing pyplot:
display_is_available = python_utils.get_display_availability()
use_pyplot = None
if display_is_available:
if python_utils.module_exists("matplotlib.pyplot"):
# This means everything works with the default matplotlib backend
import matplotlib.pyplot as plt
use_pyplot = True
else:
# matplotlib.pyplot is just not available we cannot plot anything
use_pyplot = False
else:
# Try switching backend
import matplotlib
matplotlib.use('Agg')
if python_utils.module_exists("matplotlib.pyplot"):
# The Agg backend works, pyplot is available, we just can't display plots to the screen (they'll be saved to file anyway)
import matplotlib.pyplot as plt
use_pyplot = True
import skimage.io
print("#--- Visualization ---#")
print("display_is_available: {}".format(display_is_available))
print("use_pyplot: {}".format(use_pyplot))
def flow_to_image(flow):
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv = np.zeros((flow.shape[0], flow.shape[1], 3))
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv = hsv.astype(np.uint8)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return rgb
if use_pyplot:
FIGURE_DICT = {}
def fig_out(figure_name, shape, nonblocking):
plt.margins(0)
plt.axis('off')
axes = plt.gca()
axes.set_xlim([0, shape[1]])
axes.set_ylim([0, shape[0]])
if display_is_available:
if nonblocking:
plt.draw()
plt.pause(0.001)
else:
plt.show()
# plt.savefig("{}.png".format(figure_name), bbox_inches='tight', pad_inches=0)
plt.savefig("{}.png".format(figure_name), pad_inches=0)
def init_figures(figure_names, nonblocking=True, figsize=(4, 4)):
for i, figure_name in enumerate(figure_names):
fig = plt.figure(i, figsize=figsize)
fig.canvas.set_window_title(figure_name)
FIGURE_DICT[figure_name] = i
if nonblocking:
plt.ion()
def plot_image(image):
plt.imshow(image[:, :, :3]) # Remove extra channels if any
def plot_example(figure_name, image, gt_polygon_map, disp_field_map=None, disp_polygon_map=None, nonblocking=True):
patch_outer_res = image.shape[0]
gt_polygon_map = gt_polygon_map.astype(np.float32)
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay GT polygons with 0.5 alpha
shown_gt_polygon_map = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_gt_polygon_map[:, :, :3] = gt_polygon_map
shown_gt_polygon_map[:, :, 3] = np.any(gt_polygon_map, axis=-1) / 2
plt.imshow(shown_gt_polygon_map)
if disp_polygon_map is not None:
disp_polygon_map = disp_polygon_map.astype(np.float32)
disp_polygon_map /= 2
# Overlay displaced polygons with 0.5 alpha
shown_disp_polygon_map = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_disp_polygon_map[:, :, :3] = disp_polygon_map
shown_disp_polygon_map[:, :, 3] = np.any(disp_polygon_map, axis=-1) / 2
plt.imshow(shown_disp_polygon_map)
# Overlay displacement map with 0.5 alpha
if disp_field_map is not None:
patch_inner_res = disp_field_map.shape[0]
patch_padding = (patch_outer_res - patch_inner_res) // 2
shown_disp_field_map_padded = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_disp_field_map = np.empty_like(disp_field_map)
maxi = np.max(np.abs(disp_field_map))
shown_disp_field_map[:, :, 0] = disp_field_map[:, :, 0] / (maxi + 1e-6)
shown_disp_field_map[:, :, 1] = disp_field_map[:, :, 1] / (maxi + 1e-6)
shown_disp_field_map = (shown_disp_field_map + 1) / 2
shown_disp_field_map_padded[patch_padding:-patch_padding, patch_padding:-patch_padding, 1:3] = shown_disp_field_map
shown_disp_field_map_padded[patch_padding:-patch_padding, patch_padding:-patch_padding, 3] = 0.5
plt.imshow(shown_disp_field_map_padded)
# Draw quivers on displaced corners
if disp_polygon_map is not None:
disp_polygon_map_cropped_corners = disp_polygon_map[patch_padding:-patch_padding, patch_padding:-patch_padding, 2]
quiver_indexes = np.where(0 < disp_polygon_map_cropped_corners.max() - 1e-1 < disp_polygon_map_cropped_corners)
if len(quiver_indexes[0]) and len(quiver_indexes[1]):
disp_field_map_corners = disp_field_map[quiver_indexes[0], quiver_indexes[1], :]
plt.quiver(quiver_indexes[1] + patch_padding, quiver_indexes[0] + patch_padding, disp_field_map_corners[:, 1],
disp_field_map_corners[:, 0], scale=1, scale_units="xy", angles="xy", width=0.005, color="purple")
fig_out(figure_name, image.shape, nonblocking)
def plot_example_homography(figure_name, image, aligned_polygon_raster, misaligned_polygon_raster, nonblocking=True):
patch_res = image.shape[0]
aligned_polygon_raster = aligned_polygon_raster.astype(np.float32)
misaligned_polygon_raster = misaligned_polygon_raster.astype(np.float32)
# Darken image and gt_polygon_map
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay aligned_polygon_raster with 0.5 alpha
shown_aligned_polygon_raster = np.zeros((patch_res, patch_res, 4))
shown_aligned_polygon_raster[:, :, 1] = aligned_polygon_raster[:, :, 0]
shown_aligned_polygon_raster[:, :, 3] = aligned_polygon_raster[:, :, 0] / 8
plt.imshow(shown_aligned_polygon_raster)
# Overlay misaligned_polygon_raster with 0.5 alpha
shown_misaligned_polygon_raster = np.zeros((patch_res, patch_res, 4))
shown_misaligned_polygon_raster[:, :, 0] = misaligned_polygon_raster[:, :, 0]
shown_misaligned_polygon_raster[:, :, 3] = misaligned_polygon_raster[:, :, 0] / 8
plt.imshow(shown_misaligned_polygon_raster)
fig_out(figure_name, image.shape, nonblocking)
def plot_polygons(polygons, color):
# print("plot_polygons(polygons, color)") # TODO: remove
for i, polygon in enumerate(polygons):
# Remove coordinates after nans
indexes_of_nans = np.where(np.isnan(polygon[:, 0]))[0]
if len(indexes_of_nans):
polygon_nans_crop = polygon[:indexes_of_nans[-1], :]
polygon_utils.plot_polygon(polygon_nans_crop, color=color, draw_labels=False, indexing="ij")
else:
polygon_utils.plot_polygon(polygon, color=color, draw_labels=False, indexing="ij")
# if 10 < i: # TODO: remove
# break # TODO: remove
def plot_example_polygons(figure_name, image, gt_polygons, disp_polygons=None, aligned_disp_polygons=None, nonblocking=True):
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Draw gt polygons
plot_polygons(gt_polygons, "green")
if disp_polygons is not None:
plot_polygons(disp_polygons, "red")
if aligned_disp_polygons is not None:
plot_polygons(aligned_disp_polygons, "blue")
fig_out(figure_name, image.shape, nonblocking)
def plot_seg(figure_name, image, seg, nonblocking=True):
patch_outer_res = image.shape[0]
patch_inner_res = seg.shape[0]
patch_padding = (patch_outer_res - patch_inner_res) // 2
if 3 < seg.shape[2]:
seg = seg[:, :, 1:4]
# seg = seg.astype(np.float32)
# print(seg.dtype)
# print(seg.shape)
# print(seg.min())
# print(seg.max())
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay GT polygons
shown_seg = np.zeros((patch_outer_res, patch_outer_res, 4))
if 0 < patch_padding:
shown_seg[patch_padding:-patch_padding, patch_padding:-patch_padding, :3] = seg[:, :, :]
shown_seg[patch_padding:-patch_padding, patch_padding:-patch_padding, 3] = np.clip(np.sum(seg[:, :, :], axis=-1), 0, 1)
else:
shown_seg[:, :, :3] = seg[:, :, :]
shown_seg[:, :, 3] = np.clip(
np.sum(seg[:, :, :], axis=-1), 0, 1)
plt.imshow(shown_seg)
fig_out(figure_name, image.shape, nonblocking)
def plot_field_map(figure_name, field_map, nonblocking=True):
assert len(field_map.shape) == 3 and field_map.shape[2] == 2, "field_map should have 3 dimensions like so: [height, width, 2]"
from mpl_toolkits.mplot3d import Axes3D
row = np.linspace(0, 1, field_map.shape[0])
col = np.linspace(0, 1, field_map.shape[1])
rr, cc = np.meshgrid(row, col, indexing='ij')
fig = plt.figure(figsize=(18, 9))
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(rr, cc, field_map[:, :, 0], rstride=3, cstride=3, linewidth=1, antialiased=True)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(rr, cc, field_map[:, :, 1], rstride=3, cstride=3, linewidth=1, antialiased=True)
plt.savefig("{}.png".format(figure_name), pad_inches=0)
else:
def init_figures(figure_names, nonblocking=True):
print("Graphical interface (matplotlib.pyplot) is not available. Will print out relevant values instead of "
"plotting.")
def plot_example(figure_name, image, gt_polygon_map, disp_field_map, disp_polygon_map, nonblocking=True):
print(figure_name)
def plot_example_homography(figure_name, image, aligned_polygon_raster, misaligned_polygon_raster,
nonblocking=True):
print(figure_name)
def plot_example_polygons(figure_name, image, gt_polygons, disp_polygons, aligned_disp_polygons=None, nonblocking=True):
print(figure_name)
# print("gt_polygons:")
# print(gt_polygons)
# print("aligned_disp_polygons:")
# print(aligned_disp_polygons)
def plot_seg(figure_name, image, seg, nonblocking=True):
print(figure_name)
def plot_batch(figure_names, image_batch, gt_polygon_map_batch, disp_field_map_batches, disp_polygon_map_batch, nonblocking=True):
assert len(figure_names) == len(disp_field_map_batches)
# batch_size = gt_polygon_map_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
for figure_name, disp_field_map_batch in zip(figure_names, disp_field_map_batches):
plot_example(figure_name, image_batch[index], gt_polygon_map_batch[index], disp_field_map_batch[index], disp_polygon_map_batch[index], nonblocking=nonblocking)
def plot_batch_polygons(figure_name, image_batch, gt_polygons_batch, disp_polygons_batch, aligned_disp_polygons_batch, nonblocking=True):
# batch_size = image_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
plot_example_polygons(figure_name, image_batch[index], gt_polygons_batch[index], disp_polygons_batch[index], aligned_disp_polygons_batch[index], nonblocking=nonblocking)
def plot_batch_seg(figure_name, image_batch, seg_batch):
# batch_size = image_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
plot_seg(figure_name, image_batch[index], seg_batch[index])
def save_plot_image_polygons(filepath, ori_image, ori_gt_polygons, disp_polygons, aligned_disp_polygons, line_width=1):
spatial_shape = ori_image.shape[:2]
ori_gt_polygons_map = polygon_utils.draw_polygon_map(ori_gt_polygons, spatial_shape, fill=False, edges=True,
vertices=False, line_width=line_width)
disp_polygons_map = polygon_utils.draw_polygon_map(disp_polygons, spatial_shape, fill=False, edges=True,
vertices=False, line_width=line_width)
aligned_disp_polygons_map = polygon_utils.draw_polygon_map(aligned_disp_polygons, spatial_shape, fill=False,
edges=True, vertices=False, line_width=line_width)
output_image = ori_image[:, :, :3] # Keep first 3 channels
output_image = output_image.astype(np.float64)
output_image[np.where(0 < ori_gt_polygons_map[:, :, 0])] = np.array([0, 255, 0])
output_image[np.where(0 < disp_polygons_map[:, :, 0])] = np.array([255, 0, 0])
output_image[np.where(0 < aligned_disp_polygons_map[:, :, 0])] = np.array([0, 0, 255])
# output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
def save_plot_segmentation_image(filepath, segmentation_image):
output_image = np.zeros((segmentation_image.shape[0], segmentation_image.shape[1], 4))
output_image[:, :, :3] = segmentation_image[:, :, 1:4] # Remove background channel
output_image[:, :, 3] = np.sum(segmentation_image[:, :, 1:4], axis=-1) # Add alpha
output_image = output_image * 255
output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
| 13,891 | 40.717718 | 173 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/main.py | # Quicky align the OSM data of your images with this script
import sys
import os
import argparse
import skimage.io
import numpy as np
import test
sys.path.append("../../utils")
import run_utils
import print_utils
import geo_utils
CONFIG = "config"
IMAGE = "geo_images/test_image.tif"
SHAPEFILE = None
BATCH_SIZE = 12
RUNS_DIRPATH = "runs.igarss2019" # Best models: runs.igarss2019
# Should be in descending order:
DS_FAC_LIST = [
8,
4,
2,
1,
]
RUN_NAME_FORMAT = "ds_fac_{}_inria_bradbury_all_2" # Best models: ds_fac_{}_inria_bradbury_all_2
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
default=CONFIG,
type=str,
help='Name of the config file, excluding the .json file extension.')
argparser.add_argument(
'-i', '--image',
default=IMAGE,
type=str,
help='Filepath to the GeoTIFF image.')
argparser.add_argument(
'-s', '--shapefile',
default=SHAPEFILE,
type=str,
help='Filepath to the shapefile.')
argparser.add_argument(
'-b', '--batch_size',
default=BATCH_SIZE,
type=int,
help='Batch size. Generally set as large as the VRAM can handle. Default value can be set in config file.')
argparser.add_argument(
'-r', '--runs_dirpath',
default=RUNS_DIRPATH,
type=str,
help='Name of directory where the models can be found.')
argparser.add_argument(
'-d', '--ds_fac',
default=DS_FAC_LIST,
type=int,
nargs='+',
help='Downscaling factors. Should be a list of descending integers. Used to retrieve run names')
argparser.add_argument(
'--pixelsize',
type=float,
help='Set pixel size (in meters) of the image. Useful when the image does not have this value in its metadata.')
args = argparser.parse_args()
return args
def read_image(filepath, pixelsize=None):
image_array = skimage.io.imread(filepath)
if pixelsize is None:
pixelsize = geo_utils.get_pixelsize(filepath)
assert type(pixelsize) == float, "pixelsize should be float, not {}".format(type(pixelsize))
if pixelsize < 1e-3:
print_utils.print_warning("WARNING: pixel size of image is detected to be {}m which seems very small to be correct. "
"If problems occur specify pixelsize with the pixelsize command-line argument".format(pixelsize))
image_metadata = {
"filepath": filepath,
"pixelsize": pixelsize,
}
return image_array, image_metadata
def normalize(image, mu=None, sigma=None):
if mu is None:
mu = np.mean(image)
if sigma is None:
sigma = np.std(image)
return (image - mu) / sigma
def get_osm_annotations(filepath):
filename_no_extension = os.path.splitext(filepath)[0]
npy_filepath = filename_no_extension + ".npy"
if os.path.exists(npy_filepath):
print_utils.print_info("Loading OSM building data from disc...")
gt_polygons = np.load(npy_filepath, allow_pickle=True)
else:
print_utils.print_info("Fetching OSM building data from the internet...")
gt_polygons = geo_utils.get_polygons_from_osm(filepath, tag="building")
# Save npy to avoid re-fetching:
np.save(npy_filepath, gt_polygons)
# Save shapefile for visualisation:
shp_filepath = filename_no_extension + ".shp"
geo_utils.save_shapefile_from_polygons(gt_polygons, filepath, shp_filepath)
return gt_polygons
def get_shapefile_annotations(image_filepath, shapefile_filepath):
polygons, _ = geo_utils.get_polygons_from_shapefile(image_filepath, shapefile_filepath)
return polygons
def save_annotations(image_filepath, polygons):
filename_no_extension = os.path.splitext(image_filepath)[0]
npy_filepath = filename_no_extension + ".aligned.npy"
shp_filepath = filename_no_extension + ".aligned.shp"
np.save(npy_filepath, polygons)
geo_utils.save_shapefile_from_polygons(polygons, image_filepath, shp_filepath)
def get_abs_path(filepath):
working_dir = os.path.dirname(os.path.abspath(__file__))
if os.path.isabs(filepath):
abs_path = filepath
else:
abs_path = os.path.join(working_dir, filepath)
return abs_path
def print_hist(hist):
print("hist:")
for (bin, count) in zip(hist[1], hist[0]):
print("{}: {}".format(bin, count))
def clip_image(image, min, max):
image = np.maximum(np.minimum(image, max), min)
return image
def get_min_max(image, std_factor=2):
mu = np.mean(image, axis=(0, 1))
std = np.std(image, axis=(0, 1))
min = mu - std_factor * std
max = mu + std_factor * std
return min, max
def stretch_image(image, min, max, target_min, target_max):
image = (image - min) / (max - min)
image = image * (target_max - target_min) + target_min
return image
def check_polygons_in_image(image, polygons):
"""
Allows some vertices to be outside the image. Return s true if at least 1 is inside.
:param image:
:param polygons:
:return:
"""
height = image.shape[0]
width = image.shape[1]
min_i = min([polygon[:, 0].min() for polygon in polygons])
min_j = min([polygon[:, 1].min() for polygon in polygons])
max_i = max([polygon[:, 0].max() for polygon in polygons])
max_j = max([polygon[:, 1].max() for polygon in polygons])
return not (max_i < 0 or height < min_i or max_j < 0 or width < min_j)
def main():
args = get_args()
config = run_utils.load_config(args.config)
if config is None:
print_utils.print_error(
"ERROR: cannot continue without a config file. Exiting now...")
exit()
print_utils.print_info("Using downscaling factors: {}".format(args.ds_fac))
run_name_list = [RUN_NAME_FORMAT.format(ds_fac) for ds_fac in args.ds_fac]
print_utils.print_info("Reading image...")
image_filepath = get_abs_path(args.image)
image, image_metadata = read_image(image_filepath, args.pixelsize)
image = clip_image(image, 0, 255)
# hist = np.histogram(image)
# print_hist(hist)
im_min, im_max = get_min_max(image, std_factor=3)
# print("min: {}, max: {}".format(im_min, im_max))
image = stretch_image(image, im_min, im_max, 0, 255)
image = clip_image(image, 0, 255)
# hist = np.histogram(image)
# print_hist(hist)
print("Image stats:")
print("\tShape: {}".format(image.shape))
print("\tMin: {}".format(image.min()))
print("\tMax: {}".format(image.max()))
if args.shapefile is not None:
shapefile_filepath = get_abs_path(args.shapefile)
gt_polygons = get_shapefile_annotations(image_filepath, shapefile_filepath)
else:
gt_polygons = get_osm_annotations(image_filepath)
print("Polygons stats:")
print("\tCount: {}".format(len(gt_polygons)))
print("\tMin: {}".format(min([polygon.min() for polygon in gt_polygons])))
print("\tMax: {}".format(max([polygon.max() for polygon in gt_polygons])))
if not check_polygons_in_image(image, gt_polygons):
print_utils.print_error("ERROR: polygons are not inside the image. This is most likely due to using the wrong projection when reading the input shapefile. Aborting...")
exit()
print_utils.print_info("Aligning building annotations...")
aligned_polygons = test.test_align_gt(args.runs_dirpath, image, image_metadata, gt_polygons, args.batch_size,
args.ds_fac, run_name_list, config["disp_max_abs_value"],
output_shapefiles=False)
print_utils.print_info("Saving aligned building annotations...")
save_annotations(args.image, aligned_polygons)
if __name__ == '__main__':
main()
| 8,129 | 31.390438 | 176 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/5_model_buildings.py | import os.path
import numpy as np
import bpy, bmesh
import config
OUTPUT_BASE_DIRPATH = os.path.join(config.PROJECT_DIR, "3d_buildings/leibnitz")
SCALE = 0.1
IMAGE_HEIGHT = 12360 * 0.5 # In meters
IMAGE_WIDTH = 17184 * 0.5 # In meters
UV_SCALE = (1 / (IMAGE_HEIGHT * SCALE), 1 / (IMAGE_WIDTH * SCALE)) # (u, v)
def build_buildings(polygon_list, heights):
bm = bmesh.new()
uv_layer = bm.loops.layers.uv.new()
for index, (polygon, height) in enumerate(zip(polygon_list, heights)):
if index % 1000 == 0:
print("Progress: {}/{}".format(index + 1, len(polygon_list)))
verts = []
for p in polygon:
vert = bm.verts.new((p[1], - p[0], 0))
verts.append(vert)
face = bm.faces.new(verts)
for p, loop in zip(polygon, face.loops):
loop[uv_layer].uv = (p[1] * UV_SCALE[0], 1 - p[0] * UV_SCALE[1])
# Extrude by height
r = bmesh.ops.extrude_discrete_faces(bm, faces=[face])
bmesh.ops.translate(bm, vec=(0, 0, height), verts=r['faces'][0].verts)
bm.normal_update()
me = bpy.data.meshes.new("polygon")
bm.to_mesh(me)
ob = bpy.data.objects.new("building", me)
bpy.context.scene.objects.link(ob)
bpy.context.scene.update()
# Load building footprints
polygon_list = np.load(os.path.join(OUTPUT_BASE_DIRPATH, "polygons.npy"))
scaled_polygon_list = [SCALE * polygon for polygon in polygon_list]
heights = np.load(os.path.join(OUTPUT_BASE_DIRPATH, "heights.npy"))
scaled_heights = SCALE * heights
# Build each building one at a time
print("# --- Starting to build buildings: --- #")
build_buildings(scaled_polygon_list, scaled_heights)
print("# --- Finished building buildings --- #")
| 1,792 | 27.460317 | 79 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/model_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
import print_utils
DEBUG = False
SUMMARY = False
def print_debug(obj):
if DEBUG:
print_utils.print_debug(obj.__str__())
def conv_conv_pool(input_, n_filters, name="", pool=True, activation=tf.nn.elu, weight_decay=None,
dropout_keep_prob=None):
"""{Conv -> BN -> RELU}x2 -> {Pool, optional}
Args:
input_ (4-D Tensor): (batch_size, H, W, C)
n_filters (list): number of filters [int, int]
training (1-D Tensor): Boolean Tensor
name (str): name postfix
pool (bool): If True, MaxPool2D
activation: Activation function
weight_decay: Weight decay rate
Returns:
net: output of the Convolution operations
pool (optional): output of the max pooling operations
"""
net = input_
with tf.variable_scope("layer_{}".format(name)):
for i, F in enumerate(n_filters):
net = tf_utils.complete_conv2d(net, F, (3, 3), padding="VALID", activation=activation,
bias_init_value=-0.01,
weight_decay=weight_decay,
summary=SUMMARY)
if pool is False:
return net, None
else:
pool = tf.layers.max_pooling2d(net, (2, 2), strides=(2, 2), name="pool_{}".format(name))
return net, pool
def upsample_crop_concat(to_upsample, input_to_crop, size=(2, 2), weight_decay=None, name=None):
"""Upsample `to_upsample`, crop to match resolution of `input_to_crop` and concat the two.
Args:
input_A (4-D Tensor): (N, H, W, C)
input_to_crop (4-D Tensor): (N, 2*H + padding, 2*W + padding, C2)
size (tuple): (height_multiplier, width_multiplier) (default: (2, 2))
name (str): name of the concat operation (default: None)
Returns:
output (4-D Tensor): (N, size[0]*H, size[1]*W, 2*C2)
"""
H, W, _ = to_upsample.get_shape().as_list()[1:]
_, _, target_C = input_to_crop.get_shape().as_list()[1:]
H_multi, W_multi = size
target_H = H * H_multi
target_W = W * W_multi
upsample = tf.image.resize_bilinear(to_upsample, (target_H, target_W), name="upsample_{}".format(name))
upsample = tf_utils.complete_conv2d(upsample, target_C, (3, 3), padding="SAME", bias_init_value=-0.01,
weight_decay=weight_decay,
summary=SUMMARY)
# TODO: initialize upsample with bilinear weights
# upsample = tf.layers.conv2d_transpose(to_upsample, target_C, kernel_size=2, strides=1, padding="valid", name="deconv{}".format(name))
crop = tf.image.resize_image_with_crop_or_pad(input_to_crop, target_H, target_W)
return tf.concat([upsample, crop], axis=-1, name="concat_{}".format(name))
def upsample_crop(input, resolution, factor=(2, 2), name=None):
"""
Scales the input displacement field map by factor.
First upsamples by factor,
then crops to resolution.
:param input: Tensor to upsample and then crop
:param resolution: Output resolution (row_count, col_count)
:param factor: Factor of scaling (row_factor, col_factor)
:param name: Name of op
:return: Upsampled + cropped tensor
"""
# Upsample
up_size = (input.shape[1] * factor[0], input.shape[2] * factor[1])
input_upsampled = tf.image.resize_bilinear(input, up_size, name="upsample_{}".format(name))
# Crop
input_cropped = tf.image.resize_image_with_crop_or_pad(input_upsampled, resolution[0], resolution[1])
return input_cropped
def build_input_branch(input, feature_base_count, pool_count, name="", weight_decay=None):
res_levels = pool_count + 1
with tf.variable_scope(name):
print_debug(name)
levels = []
for res_level_index in range(res_levels):
print_debug("\tlevel {}:".format(res_level_index))
feature_count = feature_base_count * math.pow(2, res_level_index)
if res_level_index == 0:
# Add first level
conv, pool = conv_conv_pool(input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), weight_decay=weight_decay)
elif res_level_index < res_levels - 1:
# Add all other levels (except the last one)
level_input = levels[-1][1] # Select the previous pool
conv, pool = conv_conv_pool(level_input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), weight_decay=weight_decay)
elif res_level_index == res_levels - 1:
# Add last level
level_input = levels[-1][1] # Select the previous pool
conv, pool = conv_conv_pool(level_input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), pool=False,
weight_decay=weight_decay)
else:
print("WARNING: Should be impossible to get here!")
conv = pool = None
print_debug("\t\tconv: {}".format(conv))
print_debug("\t\tpool: {}".format(pool))
levels.append((conv, pool))
return levels
def build_common_part(branch_levels_list, feature_base_count,
name="", weight_decay=None):
"""
Merges the two branches level by level in a U-Net fashion
:param branch_levels_list:
:param feature_base_count:
:param name:
:param weight_decay:
:return:
"""
res_levels = len(branch_levels_list[0])
with tf.variable_scope(name):
print_debug(name)
# Concat branches at each level + add conv layers
levels = []
for level_index in range(res_levels):
print_debug("\tlevel {}:".format(level_index))
concat_a_b = tf.concat([branch_levels[level_index][0] for branch_levels in branch_levels_list], axis=-1,
name="concat_a_b_{}".format(level_index))
print_debug("\t\tconcat_a_b: {}".format(concat_a_b))
feature_count = feature_base_count * math.pow(2, level_index)
concat_a_b_conv, _ = conv_conv_pool(concat_a_b, [feature_count, feature_count],
name="concat_a_b_conv{}".format(level_index), pool=False,
weight_decay=weight_decay)
print_debug("\t\tconcat_a_b_conv: {}".format(concat_a_b_conv))
levels.append(concat_a_b_conv)
return levels
def build_output_branch(input_levels, feature_base_count, name="", weight_decay=None):
with tf.variable_scope(name):
print_debug(name)
res_levels = len(input_levels)
prev_level_output = None
output_levels = []
for level_index in range(res_levels - 1, -1, -1):
print_debug("\tlevel {}:".format(level_index))
if prev_level_output is None:
# This means we are at the bottom of the "U" of the U-Net
prev_level_output = input_levels[level_index]
else:
# Now concat prev_level_output with current input level
up = upsample_crop_concat(prev_level_output, input_levels[level_index], weight_decay=weight_decay,
name="up_{}".format(level_index))
print_debug("\t\tup: {}".format(up))
feature_count = feature_base_count * math.pow(2, level_index)
final_conv, _ = conv_conv_pool(up, [feature_count, feature_count],
name="final_conv_{}".format(level_index), pool=False,
weight_decay=weight_decay)
print_debug("\t\tfinal_conv: {}".format(final_conv))
output_levels.insert(0, final_conv) # Insert at the beginning because we are iterating in reverse order
prev_level_output = final_conv
return output_levels
def build_pred_branch(input_levels, output_channels, name=""):
with tf.variable_scope(name):
print_debug(name)
output_levels = []
output_level_0 = None
level_0_resolution = None
for level_index, input in enumerate(input_levels):
print_debug("\tlevel {}:".format(level_index))
# Add prediction layer then upsample prediction to match level 0's prediction resolution
pred = tf.layers.conv2d(input, output_channels, (1, 1), name="pred_conv1x1_level_{}".format(level_index),
padding='VALID')
tf.summary.histogram("pred_{}".format(level_index), pred)
print_debug("\t\tpred: {}".format(pred))
if level_index == 0:
output_level_0 = pred
level_0_resolution = pred.get_shape().as_list()[1:3]
else:
# Upsample pred and crop to the resolution of the first level
single_factor = math.pow(2, level_index)
pred = upsample_crop(pred, level_0_resolution, (single_factor, single_factor),
name="convert_disp_pred_{}".format(level_index))
output_levels.append(pred)
stacked_output_levels = tf.stack(output_levels, axis=1, name="stacked_preds")
print_debug("\tstacked_output_levels: {}".format(stacked_output_levels))
return output_level_0, stacked_output_levels
def build_multibranch_unet(input_branch_params_list, pool_count, common_feature_base_count, output_branch_params_list,
weight_decay=None):
"""
Builds a multi-branch U-Net network. Has len(input_tensors) input branches and len(output_channel_counts) output branches.
:param input_branch_params_list: [
{
"tensor": input Tensorflow tensor,
"name": name used in internal scope of the graph,
"feature_base_count": number of features of the first conv for the each input branch. Multiplied by 2 after each conv_conv block,
},
...
]
:param pool_count: number of 2x2 pooling operations. Results in (pool_count+1) resolution levels
:param common_feature_base_count: number of features of the first conv for the common part of the network. Multiplied by 2 after each conv_conv block
:param output_branch_params_list: [
{
"feature_base_count": like input feature_base_counts but for outputs,
"channel_count": integer for the final channel count,
"activation": final activation function,
"name": name used in internal scope of the graph,
},
...
]
:param weight_decay: (Default: None). Weight decay rate
:return: output heads, keep_prob (not used)
"""
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
# Build the separate simple convolution networks for each input:
input_branch_levels_list = []
for params in input_branch_params_list:
tf.summary.histogram("input_{}".format(params["name"]), params["tensor"])
branch_levels = build_input_branch(params["tensor"], params["feature_base_count"], pool_count,
name="branch_{}".format(params["name"]),
weight_decay=weight_decay)
input_branch_levels_list.append(branch_levels)
# Build the common part of the network, concatenating inout branches at all levels
common_part_levels = build_common_part(input_branch_levels_list,
common_feature_base_count,
name="common_part",
weight_decay=weight_decay)
# Build the splitting part of the network, each level (except the last one) finishing with output branches.
# Each branch is like the upsampling part of a U-Net
outputs = []
for params in output_branch_params_list:
branch_levels = build_output_branch(common_part_levels,
params["feature_base_count"],
name="branch_{}".format(params["name"]),
weight_decay=weight_decay)
# Add the last layers for prediction, then upsample each levels' prediction to level 0's resolution
# TODO: keep this for legacy reasons:
if params["activation"] == tf.identity:
name = "branch_{}_pred_logit".format(params["name"])
else:
name = "branch_{}_pred".format(params["name"])
level_0_pred, stacked_pred_logits = build_pred_branch(branch_levels,
output_channels=params["channel_count"],
name=name)
# Apply activation function to logits
stacked_preds = params["activation"](stacked_pred_logits)
output = (stacked_pred_logits, stacked_preds, stacked_preds[:, 0, ...])
outputs.append(output)
return outputs, keep_prob
def build_double_unet(input_image, input_poly_map,
image_feature_base_count, poly_map_feature_base_count, common_feature_base_count, pool_count,
disp_output_channels, add_seg_output=True, seg_output_channels=1,
weight_decay=None):
"""
Build the double U-Net network. Has two input branches and two output branches (actually, each resolution level
except the last one have two output branches).
:param input_image: image
:param input_poly_map: polygon_map
:param image_feature_base_count: number of features of the first conv for the image branch. Multiplied by 2 after each conv_conv block
:param poly_map_feature_base_count: number of features of the first conv for the polygon map branch. Multiplied by 2 after each conv_conv block
:param common_feature_base_count: number of features of the first conv for the common part of the network. Multiplied by 2 after each conv_conv block
:param pool_count: number of 2x2 pooling operations. Results in (pool_count+1) resolution levels
:param disp_output_channels: Output dimension for the displacement prediction
:param add_seg_output: (Default: True). If True, a segmentation output branch is built. If False, no additional branch is built and the seg_output_channels argument is ignored.
:param seg_output_channels: Output dimension for the segmentation prediction
:param weight_decay: (Default: None). Weight decay rate
:return: Network
"""
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.histogram("input_image", input_image)
tf.summary.histogram("input_poly_map", input_poly_map)
# Build the two separate simple convolution networks for each input
branch_image_levels = build_input_branch(input_image, image_feature_base_count, pool_count,
name="branch_image",
weight_decay=weight_decay)
branch_poly_map_levels = build_input_branch(input_poly_map, poly_map_feature_base_count, pool_count,
name="branch_poly_map",
weight_decay=weight_decay)
# Build the common part of the network, concatenating the image and polygon map branches at all levels
common_part_levels = build_common_part([branch_image_levels, branch_poly_map_levels],
common_feature_base_count,
name="common_part",
weight_decay=weight_decay)
# Build the splitting part of the network, each level (except the last one) finishing with two branches: one for
# displacement map prediction and the other for segmentation prediction. Each branch is like the upsampling part of
# A U-Net
disp_levels = build_output_branch(common_part_levels,
common_feature_base_count,
name="branch_disp",
weight_decay=weight_decay)
if add_seg_output:
seg_levels = build_output_branch(common_part_levels,
common_feature_base_count,
name="branch_seg",
weight_decay=weight_decay)
else:
seg_levels = None
# Add the last layers for prediction, then upsample each levels' prediction to level 0's resolution
level_0_disp_pred_logit, stacked_disp_pred_logits = build_pred_branch(disp_levels,
output_channels=disp_output_channels,
name="branch_disp_pred")
level_0_disp_pred = tf.nn.tanh(level_0_disp_pred_logit)
stacked_disp_preds = tf.nn.tanh(stacked_disp_pred_logits)
if add_seg_output:
level_0_seg_pred_logit, stacked_seg_pred_logits = build_pred_branch(seg_levels,
output_channels=seg_output_channels,
name="branch_seg_pred_logit")
# Apply sigmoid to level_0_seg_pred_logit
level_0_seg_pred = tf.nn.sigmoid(level_0_seg_pred_logit)
else:
stacked_seg_pred_logits = None
level_0_seg_pred = None
return level_0_disp_pred, stacked_disp_preds, level_0_seg_pred, stacked_seg_pred_logits, keep_prob
def get_output_res(input_res, pool_count):
"""
This function has to be re-written if the model architecture changes
:param input_res:
:param pool_count:
:return:
"""
current_res = input_res
non_zero_remainder = False
# branch_image
for i in range(pool_count):
current_res -= 4 # 2 conv3x3
current_res, r = divmod(current_res, 2) # pool
non_zero_remainder = non_zero_remainder or bool(r)
current_res -= 4 # 2 conv3x3 of the last layer
# common_part
current_res -= 4 # 2 conv3x3
# branch_disp
for i in range(pool_count):
current_res *= 2 # upsample
current_res -= 4 # 2 conv3x3
if non_zero_remainder:
print(
"WARNING: a pooling operation will result in a non integer res, the network will automatically add padding there. The output of this function is not garanteed to be exact.")
return int(current_res), non_zero_remainder
def get_input_res(output_res, pool_count):
"""
This function has to be re-written if the model architecture changes
:param output_res:
:param pool_count:
:return:
"""
current_res = output_res
non_zero_remainder = False
# branch_disp
for i in range(pool_count):
current_res += 4 # 2 conv3x3
current_res, r = divmod(current_res, 2) # upsample
non_zero_remainder = non_zero_remainder or bool(r)
# common_part
current_res += 4 # 2 conv3x3
# branch_image
current_res += 4 # 2 conv3x3 of the last layer
for i in range(pool_count):
current_res *= 2 # pool
current_res += 4 # 2 conv3x3
return int(current_res), non_zero_remainder
def get_min_input_res(pool_count):
"""
Returns the minimum input resolution the network can handle.
Because of no-padding, the resolution of the ouput is smaller than the input and
thus there is a limit input resolution that works)
This function has to be re-written if the model architecture changes
:param pool_count:
:return:
"""
min_input_res = None
output_res = 0
non_zero_remainder = True
while non_zero_remainder:
output_res += 1
min_input_res, non_zero_remainder = get_input_res(output_res, pool_count)
return min_input_res
| 20,710 | 44.820796 | 185 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/loss_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
def displacement_error(gt, preds, level_loss_coefs, polygon_map, disp_loss_params):
"""
:param gt: Groundtruth displacement map bounded between -1 and 1. Shape [batch, height, width, channels (3)]
:param preds: Predicted displacement maps bounded between -1 and 1. Shape [batch, levels, height, width, channels (2)]
:param level_loss_coefs: Loss coefficients to apply to each level
:param polygon_map: Used as mask for fill, outline and vertex. Shape [batch, height, width, channels (3)]
:return: error
"""
height, width, _ = gt.get_shape().as_list()[1:]
with tf.name_scope("euclidean_error"):
# Compute weight mask
cropped_polygon_map = tf.image.resize_image_with_crop_or_pad(polygon_map, height, width)
# TODO: normalize correction_weights
correction_weights = 1 / (
tf.reduce_sum(tf.reduce_sum(cropped_polygon_map, axis=1), axis=1) + tf.keras.backend.epsilon())
weigths = tf.constant(
[disp_loss_params["fill_coef"], disp_loss_params["edge_coef"], disp_loss_params["vertex_coef"]],
dtype=tf.float32)
corrected_weights = weigths * correction_weights
corrected_weights = tf.expand_dims(tf.expand_dims(corrected_weights, axis=1), axis=1)
weighted_mask = tf.reduce_sum(cropped_polygon_map * corrected_weights, axis=-1)
weighted_mask = tf.expand_dims(weighted_mask, axis=1) # Add levels dimension
# Compute errors
gt = tf.expand_dims(gt, axis=1) # Add levels dimension
pixelwise_euclidean_error = tf.reduce_sum(tf.square(gt - preds), axis=-1)
masked_pixelwise_euclidean_error = pixelwise_euclidean_error * weighted_mask
# Sum errors
summed_error = tf.reduce_sum(masked_pixelwise_euclidean_error, axis=0) # Batch sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Col/Width sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Row/Height sum
summed_error = summed_error * level_loss_coefs # Apply Level loss coefficients
summed_error = tf.reduce_sum(summed_error)
# Sum weights
summed_weighted_mask = tf.reduce_sum(weighted_mask)
loss = summed_error / (summed_weighted_mask + tf.keras.backend.epsilon())
return loss
def segmentation_error(seg_gt, seg_pred_logits, level_loss_coefs, seg_loss_params):
"""
:param seg_gt:
:param seg_pred_logits:
:param level_loss_coefs:
:return:
"""
_, levels, height, width, _ = seg_pred_logits.get_shape().as_list()
# Crop seg_gt to match resolution of seg_pred_logits
seg_gt = tf.image.resize_image_with_crop_or_pad(seg_gt, height, width)
# Add background class to gt segmentation
if tf_utils.get_tf_version() == "1.4.0":
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keep_dims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
else:
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keepdims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
seg_gt = tf.concat([seg_gt_bg, seg_gt], axis=-1)
# Compute weight mask
# class_sums = tf.reduce_sum(tf.reduce_sum(seg_gt, axis=1), axis=1)
# seg_class_balance_weights = 1 / (
# class_sums + tf.keras.backend.epsilon())
seg_class_weights = tf.constant([[seg_loss_params["background_coef"], seg_loss_params["fill_coef"],
seg_loss_params["edge_coef"], seg_loss_params["vertex_coef"]]],
dtype=tf.float32)
# balanced_class_weights = seg_class_balance_weights * seg_class_weights
balanced_class_weights = seg_class_weights
balanced_class_weights = tf.expand_dims(balanced_class_weights, axis=1) # Add levels dimension
balanced_class_weights = tf.tile(balanced_class_weights, multiples=[1, levels, 1]) # Repeat on levels dimension
level_loss_coefs = tf.expand_dims(level_loss_coefs, axis=-1) # Add channels dimension
final_weights = balanced_class_weights * level_loss_coefs
final_weights = tf.expand_dims(tf.expand_dims(final_weights, axis=2), axis=2) # Add spatial dimensions
# Adapt seg_gt shape to seg_pred_logits
seg_gt = tf.expand_dims(seg_gt, axis=1) # Add levels dimension
seg_gt = tf.tile(seg_gt, multiples=[1, levels, 1, 1, 1]) # Add levels dimension
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=seg_gt, logits=seg_pred_logits)
# Now apply the various weights
weighted_loss = loss * final_weights
final_loss = tf.reduce_mean(weighted_loss)
return final_loss
def laplacian_penalty(preds, level_loss_coefs):
in_channels = preds.shape[-1]
with tf.name_scope("laplacian_penalty"):
laplace_k = tf_utils.make_depthwise_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]], in_channels)
# Reshape preds to respect the input format of the depthwise_conv2d op
shape = [preds.shape[0] * preds.shape[1]] + preds.get_shape().as_list()[2:]
reshaped_preds = tf.reshape(preds, shape)
laplacians = tf.nn.depthwise_conv2d(reshaped_preds, laplace_k, [1, 1, 1, 1], padding='SAME')
penalty_map = tf.reduce_sum(tf.square(laplacians), axis=-1)
# Reshape penalty_map to shape compatible with preds
shape = preds.get_shape().as_list()[:-1]
reshaped_penalty_map = tf.reshape(penalty_map, shape)
# Compute mean penalty per level over spatial dimension as well as over batches
level_penalties = tf.reduce_mean(reshaped_penalty_map, axis=0) # Batch mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Col/Width mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Row/Height mean
# Apply level_loss_coefs
weighted_penalties = level_penalties * level_loss_coefs
penalty = tf.reduce_mean(weighted_penalties) # Levels mean
return penalty
def main(_):
batch_size = 1
levels = 2
patch_inner_res = 3
patch_outer_res = 5
disp_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 2])
disps = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 2])
seg_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 3])
seg_logits = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 3])
level_loss_coefs = tf.placeholder(tf.float32, [levels])
mask = tf.placeholder(tf.float32, [batch_size, patch_outer_res, patch_outer_res, 3])
disp_loss = displacement_error(disp_, disps, level_loss_coefs, mask)
seg_loss = segmentation_error(seg_, seg_logits, level_loss_coefs)
penalty = laplacian_penalty(disps, level_loss_coefs)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
disp_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 2])
disp_gt[0, 0, 0, 0] = 1
disp_preds = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 2])
disp_preds[0, 0, 0, 0, 0] = 1
disp_preds[0, 1, 0, 0, 0] = 1
seg_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 3])
# seg_gt += 0.5
seg_gt[0, 0, 0, 0] = 1.0
seg_gt[0, 0, 1, 1] = 1.0
seg_gt[0, 0, 2, 2] = 1.0
seg_gt[0, 1, 0, 0] = 1.0
seg_gt[0, 1, 1, 1] = 1.0
seg_gt[0, 1, 2, 2] = 1.0
seg_pred_logits = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 3])
seg_pred_logits += -100
seg_pred_logits[0, 0, 0, 0, 0] = 100
seg_pred_logits[0, 0, 0, 1, 1] = 100
seg_pred_logits[0, 0, 0, 2, 2] = -100
seg_pred_logits[0, 1, 0, 0, 0] = 100
seg_pred_logits[0, 1, 0, 1, 1] = 100
seg_pred_logits[0, 1, 0, 2, 2] = -100
seg_pred_logits[0, 0, 1, 0, 0] = 100
seg_pred_logits[0, 0, 1, 1, 1] = 100
seg_pred_logits[0, 0, 1, 2, 2] = -100
seg_pred_logits[0, 1, 1, 0, 0] = 100
seg_pred_logits[0, 1, 1, 1, 1] = 100
seg_pred_logits[0, 1, 1, 2, 2] = -100
coefs = np.array([1, 0.5])
poly_mask = np.zeros([batch_size, patch_outer_res, patch_outer_res, 3])
poly_mask[0, 1, 1, 0] = 1
computed_disp_loss, computed_seg_loss, computed_penalty = sess.run(
[disp_loss, seg_loss, penalty], feed_dict={disp_: disp_gt, disps: disp_preds,
seg_: seg_gt, seg_logits: seg_pred_logits,
level_loss_coefs: coefs, mask: poly_mask})
print("computed_disp_loss:")
print(computed_disp_loss)
print("computed_seg_loss:")
print(computed_seg_loss)
print("computed_penalty:")
print(computed_penalty)
if __name__ == '__main__':
tf.app.run(main=main)
| 9,420 | 44.73301 | 125 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/multires_pipeline.py | import sys
import skimage.transform
import skimage.io
import numpy as np
import model
sys.path.append("../../utils")
import run_utils
import polygon_utils
import print_utils
def rescale_data(image, polygons, scale):
downsampled_image = skimage.transform.rescale(image, scale, order=3, preserve_range=True, multichannel=True, anti_aliasing=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_polygons = polygon_utils.rescale_polygon(polygons, scale)
return downsampled_image, downsampled_polygons
def downsample_data(image, metadata, polygons, factor, reference_pixel_size):
corrected_factor = factor * reference_pixel_size / metadata["pixelsize"]
scale = 1 / corrected_factor
downsampled_image, downsampled_polygons = rescale_data(image, polygons, scale)
return downsampled_image, downsampled_polygons
def upsample_data(image, metadata, polygons, factor, reference_pixel_size):
# TODO: test with metadata["pixelsize"] != config.REFERENCE_PIXEL_SIZE
corrected_factor = factor * reference_pixel_size / metadata["pixelsize"]
upsampled_image, upsampled_polygons = rescale_data(image, polygons, corrected_factor)
return upsampled_image, upsampled_polygons
def inference(runs_dirpath, ori_image, ori_metadata, ori_disp_polygons, model_disp_max_abs_value, batch_size, scale_factor, run_name):
# Setup run dir and load config file
run_dir = run_utils.setup_run_dir(runs_dirpath, run_name)
_, checkpoints_dir = run_utils.setup_run_subdirs(run_dir)
config = run_utils.load_config(config_dirpath=run_dir)
# Downsample
image, disp_polygons = downsample_data(ori_image, ori_metadata, ori_disp_polygons, scale_factor, config["reference_pixel_size"])
spatial_shape = image.shape[:2]
# Draw displaced polygon map
# disp_polygons_to_rasterize = []
disp_polygons_to_rasterize = disp_polygons
disp_polygon_map = polygon_utils.draw_polygon_map(disp_polygons_to_rasterize, spatial_shape, fill=True, edges=True,
vertices=True)
# Compute output_res
output_res = model.MapAlignModel.get_output_res(config["input_res"], config["pool_count"])
# print("output_res: {}".format(output_res))
map_align_model = model.MapAlignModel(config["model_name"], config["input_res"],
config["add_image_input"], config["image_channel_count"],
config["image_feature_base_count"],
config["add_poly_map_input"], config["poly_map_channel_count"],
config["poly_map_feature_base_count"],
config["common_feature_base_count"], config["pool_count"],
config["add_disp_output"], config["disp_channel_count"],
config["add_seg_output"], config["seg_channel_count"],
output_res,
batch_size,
config["loss_params"],
config["level_loss_coefs_params"],
config["learning_rate_params"],
config["weight_decay"],
config["image_dynamic_range"], config["disp_map_dynamic_range_fac"],
model_disp_max_abs_value)
pred_field_map, segmentation_image = map_align_model.inference(image, disp_polygon_map, checkpoints_dir)
aligned_disp_polygons = disp_polygons
# First remove polygons that are not fully inside the inner_image
padding = (spatial_shape[0] - pred_field_map.shape[0]) // 2
bounding_box = [padding, padding, spatial_shape[0] - padding, spatial_shape[1] - padding]
# aligned_disp_polygons = polygon_utils.filter_polygons_in_bounding_box(aligned_disp_polygons, bounding_box) # TODO: reimplement? But also filter out ori_gt_polygons for comparaison
aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(aligned_disp_polygons, bounding_box)
# Then apply displacement field map to aligned_disp_polygons
aligned_disp_polygons = polygon_utils.apply_disp_map_to_polygons(pred_field_map, aligned_disp_polygons)
# Restore polygons to original image space
bounding_box = [-padding, -padding, spatial_shape[0] + padding, spatial_shape[1] + padding]
aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(aligned_disp_polygons, bounding_box)
# Add padding to segmentation_image
final_segmentation_image = np.zeros((spatial_shape[0], spatial_shape[1], segmentation_image.shape[2]))
final_segmentation_image[padding:-padding, padding:-padding, :] = segmentation_image
final_segmentation_image, aligned_disp_polygons = upsample_data(final_segmentation_image, ori_metadata, aligned_disp_polygons, scale_factor, config["reference_pixel_size"])
return aligned_disp_polygons, final_segmentation_image
def multires_inference(runs_dirpath, ori_image, ori_metadata, ori_disp_polygons, model_disp_max_abs_value, batch_size, ds_fac_list, run_name_list):
"""
Returns the last segmentation image that was computed (from the finest resolution)
:param ori_image:
:param ori_metadata:
:param ori_disp_polygons:
:param model_disp_max_abs_value:
:param ds_fac_list:
:param run_name_list:
:return:
"""
aligned_disp_polygons = ori_disp_polygons # init
segmentation_image = None
# Launch the resolution chain pipeline:
for index, (ds_fac, run_name) in enumerate(zip(ds_fac_list, run_name_list)):
print("# --- downsampling_factor: {} --- #".format(ds_fac))
try:
aligned_disp_polygons, segmentation_image = inference(runs_dirpath, ori_image, ori_metadata, aligned_disp_polygons, model_disp_max_abs_value, batch_size, ds_fac, run_name)
except ValueError as e:
print_utils.print_warning(str(e))
return aligned_disp_polygons, segmentation_image
| 6,435 | 47.390977 | 186 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/4_compute_building_heights.py | import os.path
import sys
import math
import itertools
import numpy as np
import config
sys.path.append("../../utils")
import geo_utils
DATASET_DIR = os.path.join(config.PROJECT_DIR, "../../../data/stereo_dataset")
RAW_DIR = os.path.join(DATASET_DIR, "raw/leibnitz")
INPUT_DIR = "test/stereo_dataset_real_displacements.align.ds_fac_8.ds_fac_4.ds_fac_2"
VIEW_INFO_LIST = [
{
"image_name": "leibnitz_ortho_ref",
"image_filepath": os.path.join(RAW_DIR, "leibnitz_ortho_ref_RGB.tif"),
"shapefile_filepath": os.path.join(INPUT_DIR, "leibnitz_ref_rec.aligned_polygons.shp"),
# "shapefile_filepath": os.path.join(INPUT_DIR, "leibnitz_rec_ref.ori_polygons.shp"), # GT polygons
"angle": 76.66734850675575 * math.pi / 180, # Elevation
},
{
"image_name": "leibnitz_ortho_rec",
"image_filepath": os.path.join(RAW_DIR, "leibnitz_ortho_rec_RGB.tif"),
"shapefile_filepath": os.path.join(INPUT_DIR, "leibnitz_rec_ref.aligned_polygons.shp"),
# "shapefile_filepath": os.path.join(INPUT_DIR, "leibnitz_ref_rec.ori_polygons.shp"), # GT polygons
"angle": 69.62096370829768 * math.pi / 180, # Elevation
},
]
PIXELSIZE = 0.5 # 1 pixel is 0.5 meters
OUTPUT_BASE_DIRPATH = "3d_buildings/leibnitz"
def compute_heights(view_1, view_2, pixelsize):
tan_1 = math.tan(view_1["angle"])
tan_2 = math.tan(view_2["angle"])
tan_alpha = min(tan_1, tan_2)
tan_beta = max(tan_1, tan_2)
angle_height_coef = tan_alpha * tan_beta / (tan_beta - tan_alpha)
heights = []
for polygon_1, polygon_2 in zip(view_1["polygon_list"], view_2["polygon_list"]):
center_1 = np.mean(polygon_1, axis=0, keepdims=True)
center_2 = np.mean(polygon_2, axis=0, keepdims=True)
distance = np.sqrt(np.sum(np.square(center_1 - center_2), axis=1))[0]
height = distance * angle_height_coef * pixelsize
heights.append(height)
return heights
def main(view_info_list, pixelsize, output_base_dirpath):
print("# --- Loading shapefiles --- #")
view_list = []
for view_info in view_info_list:
polygon_list, properties_list = geo_utils.get_polygons_from_shapefile(view_info["image_filepath"],
view_info["shapefile_filepath"])
view = {
"polygon_list": polygon_list,
"properties_list": properties_list,
"angle": view_info["angle"],
}
view_list.append(view)
# Extract ground truth building heights
gt_heights = []
for properties in view_list[0]["properties_list"]:
gt_heights.append(properties["HEIGHT"])
gt_heights_array = np.array(gt_heights)
# Iterate over all possible pairs of views:
heights_list = []
view_pair_list = itertools.combinations(view_list, 2)
for view_pair in view_pair_list:
heights = compute_heights(view_pair[0], view_pair[1], pixelsize)
heights_list.append(heights)
# Average results from pairs
heights_list_array = np.array(heights_list)
pred_heights_array = np.mean(heights_list_array, axis=0)
# Correct pred heights:
pred_heights_array = pred_heights_array / 4.39 # Factor found with using the ground truth polygons for computing the height
polygon_list = view_list[0]["polygon_list"] # Take from the first view
# Save shapefile
output_shapefile_filepath = os.path.join(output_base_dirpath, view_info_list[0]["image_name"] + "_pred_heights.shp")
pred_properties_list = view_list[0]["properties_list"].copy() # First copy existing properties list
for i, pred_height in enumerate(pred_heights_array): # Then replace HEIGHT
pred_properties_list[i]["HEIGHT"] = pred_height
geo_utils.save_shapefile_from_polygons(view_list[0]["polygon_list"], view_info_list[0]["image_filepath"], output_shapefile_filepath, properties_list=pred_properties_list)
# Save for modeling buildings in Blender and measuring accuracy
scaled_polygon_list = [polygon * pixelsize for polygon in polygon_list]
np.save(os.path.join(output_base_dirpath, "polygons.npy"), scaled_polygon_list)
np.save(os.path.join(output_base_dirpath, "gt_heights.npy"), gt_heights_array)
np.save(os.path.join(output_base_dirpath, "pred_heights.npy"), pred_heights_array)
if __name__ == "__main__":
main(VIEW_INFO_LIST, PIXELSIZE, OUTPUT_BASE_DIRPATH)
| 4,526 | 40.154545 | 174 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/download_pretrained.py | import os.path
import urllib.request
import zipfile
ressource_filename_list = ["runs.igarss2019.zip"]
ressource_dirpath_url = "https://www-sop.inria.fr/members/Nicolas.Girard/downloads/mapalignment"
script_filepath = os.path.realpath(__file__)
zip_download_dirpath = os.path.join(os.path.dirname(script_filepath), "runs.zip")
download_dirpath = os.path.join(os.path.dirname(script_filepath), "runs")
for ressource_filename in ressource_filename_list:
ressource_url = os.path.join(ressource_dirpath_url, ressource_filename)
print("Downloading zip from {}, please wait... (approx. 406MB to download)".format(ressource_url))
urllib.request.urlretrieve(ressource_url, zip_download_dirpath)
print("Extracting zip...")
zip_ref = zipfile.ZipFile(zip_download_dirpath, 'r')
os.makedirs(download_dirpath)
zip_ref.extractall(download_dirpath)
zip_ref.close()
os.remove(zip_download_dirpath)
| 953 | 35.692308 | 102 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/1_train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
import os
import model
sys.path.append(os.path.join("../dataset_utils"))
import dataset_multires
sys.path.append("../../utils")
import python_utils
import run_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('config', "config",
"Name of the config file, excluding the .json file extension")
flags.DEFINE_boolean('new_run', False,
"Train from scratch (when True) or train from the last checkpoint (when False)")
flags.DEFINE_string('init_run_name', None,
"This is the run_name to initialize the weights from. "
"If None, weights will be initialized randomly."
"This is a single word, without the timestamp.")
flags.DEFINE_string('run_name', None,
"Continue training from run_name. This is a single word, without the timestamp.")
# If not specified, the last run is used (unless new_run is True or no runs are in the runs directory).
# If new_run is True, creates the new run with name equal run_name.
flags.DEFINE_integer('batch_size', 8, "Batch size. Generally set as large as the VRAM can handle.")
flags.DEFINE_integer('ds_fac', 8, "Downsampling factor. Choose from which resolution sub-dataset to train on.")
# Some examples:
# On Quadro M2200, 4GB VRAM: python 1_train.py --new_run --run_name=ds_fac_8 --batch_size 4 --ds_fac 8
# On Quadro M2200, 4GB VRAM: python 1_train.py --new_run --init_run_name=ds_fac_8 --run_name=ds_fac_4_with_init --batch_size 4 --ds_fac_4
# On Quadro M2200, 4GB VRAM: python 1_train.py --new_run --batch_size 4 --ds_fac 2
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --new_run --run_name=ds_fac_8_no_seg --batch_size 32 --ds_fac 8
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --new_run --run_name=ds_fac_4_no_seg --batch_size 32 --ds_fac 4
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --new_run --init_run_name=ds_fac_4_double --run_name=ds_fac_8_double --batch_size 32 --ds_fac 8
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --new_run --init_run_name=ds_fac_4_double --run_name=ds_fac_2_double --batch_size 32 --ds_fac 2
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --new_run --init_run_name=ds_fac_1_double --run_name=ds_fac_1_double_seg --batch_size 32 --ds_fac 1
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --run_name=ds_fac_8_double --batch_size 32 --ds_fac 8
# On GTX 1080 Ti, 11GB VRAM: python 1_train.py --run_name=ds_fac_2_double --batch_size 32 --ds_fac 2
def train(config, tfrecords_dirpath_list, init_run_dirpath, run_dirpath, batch_size, ds_fac_list, ds_repeat_list):
# setup init checkpoints directory path if one is specified:
if init_run_dirpath is not None:
_, init_checkpoints_dirpath = run_utils.setup_run_subdirs(init_run_dirpath, config["logs_dirname"],
config["checkpoints_dirname"])
else:
init_checkpoints_dirpath = None
# setup stage run dirs
# create run subdirectories if they do not exist
logs_dirpath, checkpoints_dirpath = run_utils.setup_run_subdirs(run_dirpath, config["logs_dirname"],
config["checkpoints_dirname"])
# compute output_res
output_res = model.MapAlignModel.get_output_res(config["input_res"], config["pool_count"])
print("output_res: {}".format(output_res))
# instantiate model object (resets the default graph)
map_align_model = model.MapAlignModel(config["model_name"], config["input_res"],
config["add_image_input"], config["image_channel_count"],
config["image_feature_base_count"],
config["add_poly_map_input"], config["poly_map_channel_count"],
config["poly_map_feature_base_count"],
config["common_feature_base_count"], config["pool_count"],
config["add_disp_output"], config["disp_channel_count"],
config["add_seg_output"], config["seg_channel_count"],
output_res,
batch_size,
config["loss_params"],
config["level_loss_coefs_params"],
config["learning_rate_params"],
config["weight_decay"],
config["image_dynamic_range"], config["disp_map_dynamic_range_fac"],
config["disp_max_abs_value"])
# train dataset
train_dataset_filename_list = dataset_multires.create_dataset_filename_list(tfrecords_dirpath_list,
config["tfrecord_filename_format"],
ds_fac_list,
dataset="train",
resolution_file_repeats=ds_repeat_list)
train_dataset_tensors = dataset_multires.read_and_decode(
train_dataset_filename_list,
output_res,
config["input_res"],
batch_size,
config["image_dynamic_range"],
disp_map_dynamic_range_fac=config["disp_map_dynamic_range_fac"],
keep_poly_prob=config["keep_poly_prob"],
data_aug=config["data_aug"],
train=True)
if config["perform_validation_step"]:
# val dataset
val_dataset_filename_list = dataset_multires.create_dataset_filename_list(tfrecords_dirpath_list,
config["tfrecord_filename_format"],
ds_fac_list,
dataset="val",
resolution_file_repeats=ds_repeat_list)
val_dataset_tensors = dataset_multires.read_and_decode(
val_dataset_filename_list,
output_res,
config["input_res"],
batch_size,
config["image_dynamic_range"],
disp_map_dynamic_range_fac=config["disp_map_dynamic_range_fac"],
keep_poly_prob=config["keep_poly_prob"],
data_aug=False,
train=False)
else:
val_dataset_tensors = None
# launch training
map_align_model.optimize(train_dataset_tensors, val_dataset_tensors,
config["max_iter"], config["dropout_keep_prob"],
logs_dirpath, config["train_summary_step"], config["val_summary_step"],
checkpoints_dirpath, config["checkpoint_step"],
init_checkpoints_dirpath=init_checkpoints_dirpath,
plot_results=config["plot_results"])
def main(_):
working_dir = os.path.dirname(os.path.abspath(__file__))
# print FLAGS
print("#--- FLAGS: ---#")
print("config: {}".format(FLAGS.config))
print("new_run: {}".format(FLAGS.new_run))
print("init_run_name: {}".format(FLAGS.init_run_name))
print("run_name: {}".format(FLAGS.run_name))
print("batch_size: {}".format(FLAGS.batch_size))
print("ds_fac: {}".format(FLAGS.ds_fac))
# load config file
config = run_utils.load_config(FLAGS.config)
# Check config setting coherences
assert len(config["level_loss_coefs_params"]) == config["pool_count"], \
"level_loss_coefs_params ({} elements) must have model_res_levels ({}) elements".format(
len(config["level_loss_coefs_params"]), config["pool_count"])
# Find data_dir
data_dir = python_utils.choose_first_existing_path(config["data_dir_candidates"])
if data_dir is None:
print("ERROR: Data directory not found!")
exit()
else:
print("Using data from {}".format(data_dir))
# Setup dataset dirpaths
tfrecords_dirpath_list = [os.path.join(data_dir, tfrecords_dirpath) for tfrecords_dirpath in
config["tfrecords_partial_dirpath_list"]]
# Overwrite config ds_fac if FLAGS specify them
if FLAGS.ds_fac is not None:
ds_fac_list = [FLAGS.ds_fac]
ds_repeat_list = [1]
else:
ds_fac_list = config["ds_fac_list"]
ds_repeat_list = config["ds_repeat_list"]
# setup init run directory of one is specified:
if FLAGS.init_run_name is not None:
init_run_dirpath = run_utils.setup_run_dir(config["runs_dirname"], FLAGS.init_run_name)
else:
init_run_dirpath = None
# setup run directory:
runs_dir = os.path.join(working_dir, config["runs_dirname"])
current_run_dirpath = run_utils.setup_run_dir(runs_dir, FLAGS.run_name, FLAGS.new_run)
# save config in logs directory
run_utils.save_config(config, current_run_dirpath)
# save FLAGS
FLAGS_filepath = os.path.join(current_run_dirpath, "FLAGS.json")
python_utils.save_json(FLAGS_filepath, {
"run_name": FLAGS.run_name,
"new_run": FLAGS.new_run,
"batch_size": FLAGS.batch_size,
"ds_fac": FLAGS.ds_fac,
})
train(config, tfrecords_dirpath_list, init_run_dirpath, current_run_dirpath, FLAGS.batch_size, ds_fac_list,
ds_repeat_list)
if __name__ == '__main__':
tf.app.run(main=main)
| 9,943 | 44.2 | 146 | py |
mapalignment | mapalignment-master/projects/utils/tf_utils.py | import tensorflow as tf
from tensorflow.python.framework.ops import get_gradient_function
import math
import numpy as np
def get_tf_version():
return tf.__version__
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def compute_current_adam_lr(optimizer):
# print(get_tf_version())
# a0, bb1, bb2 = optimizer._lr, optimizer._beta1_power, optimizer._beta2_power
# at = a0 * (1 - bb2) ** 0.5 / (1 - bb1)
# return at
return optimizer._lr # TODO: verify if this works
def count_number_trainable_params(trainable_variables=None):
"""
Counts the number of trainable variables.
"""
if trainable_variables is None:
trainable_variables = tf.trainable_variables()
tot_nb_params = 0
for trainable_variable in trainable_variables:
shape = trainable_variable.get_shape() # e.g [D,F] or [W,H,C]
current_nb_params = get_nb_params_shape(shape)
tot_nb_params = tot_nb_params + current_nb_params
return tot_nb_params
def get_nb_params_shape(shape):
"""
Computes the total number of params for a given shape.
Works for any number of shapes etc [D,F] or [W,H,C] computes D*F and W*H*C.
"""
nb_params = 1
for dim in shape:
nb_params = nb_params * int(dim)
return nb_params
def conv2d(x, W, stride=1, padding="SAME"):
"""conv2d returns a 2d convolution layer."""
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
def complete_conv2d(input_tensor, output_channels, kernel_size, stride=1, padding="SAME", activation=tf.nn.relu, bias_init_value=0.025,
std_factor=1, weight_decay=None, summary=False):
input_channels = input_tensor.get_shape().as_list()[-1]
output_channels = int(output_channels)
with tf.name_scope('W'):
w_conv = weight_variable([kernel_size[0], kernel_size[1], input_channels, output_channels], std_factor=std_factor, wd=weight_decay)
if summary:
variable_summaries(w_conv)
with tf.name_scope('bias'):
b_conv = bias_variable([output_channels], init_value=bias_init_value)
if summary:
variable_summaries(b_conv)
z_conv = conv2d(input_tensor, w_conv, stride=stride, padding=padding) + b_conv
if summary:
tf.summary.histogram('pre_activations', z_conv)
if activation is not None:
h_conv = activation(z_conv)
else:
h_conv = z_conv
if summary:
tf.summary.histogram('activations', h_conv)
return h_conv
def conv2d_transpose(x, W, output_shape, stride=1, padding="SAME"): # TODO: add output_shape ?
"""conv2d_transpose returns a 2d transpose convolution layer."""
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding=padding)
def complete_conv2d_transpose(input_tensor, output_channels, output_size, kernel_size, stride=1, padding="SAME", activation=tf.nn.relu,
bias_init_value=0.025, std_factor=1, weight_decay=None, summary=False):
batch_size = input_tensor.get_shape().as_list()[0]
input_channels = input_tensor.get_shape().as_list()[-1]
output_channels = int(output_channels)
with tf.name_scope('W'):
w_conv = weight_variable([kernel_size[0], kernel_size[1], output_channels, input_channels], std_factor=std_factor, wd=weight_decay)
if summary:
variable_summaries(w_conv)
with tf.name_scope('bias'):
b_conv = bias_variable([output_channels], init_value=bias_init_value)
if summary:
variable_summaries(b_conv)
z_conv = conv2d_transpose(input_tensor, w_conv, [batch_size, output_size[0], output_size[1], output_channels], stride=stride, padding=padding) + b_conv
if summary:
tf.summary.histogram('pre_activations', z_conv)
h_conv = activation(z_conv)
if summary:
tf.summary.histogram('activations', h_conv)
return h_conv
def complete_fc(input_tensor, output_channels, bias_init_value=0.025, weight_decay=None, activation=tf.nn.relu, summary=False):
batch_size = input_tensor.get_shape().as_list()[0]
net = tf.reshape(input_tensor, (batch_size, -1))
input_channels = net.get_shape().as_list()[-1]
with tf.name_scope('W'):
w_fc = weight_variable([input_channels, output_channels], wd=weight_decay)
if summary:
variable_summaries(w_fc)
with tf.name_scope('bias'):
b_fc = bias_variable([output_channels], init_value=bias_init_value)
if summary:
variable_summaries(b_fc)
z_fc = tf.matmul(net, w_fc) + b_fc
if summary:
tf.summary.histogram('pre_activations', z_fc)
h_fc = activation(z_fc)
if summary:
tf.summary.histogram('activations', h_fc)
return h_fc
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape, std_factor=1, wd=None):
"""weight_variable generates a weight variable of a given shape. Adds weight decay if specified"""
# Initialize using Xavier initializer
fan_in = 100
fan_out = 100
if len(shape) == 4:
fan_in = shape[0] * shape[1] * shape[2]
fan_out = shape[3]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
print("WARNING: This shape format is not handled! len(shape) = {}".format(len(shape)))
stddev = std_factor * math.sqrt(2 / (fan_in + fan_out))
initial = tf.truncated_normal(shape, stddev=stddev)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(initial), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return tf.Variable(initial)
def bias_variable(shape, init_value=0.025):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(init_value, shape=shape)
return tf.Variable(initial)
def parametric_relu(_x):
alphas = tf.get_variable('alpha', _x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
# with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
# with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def make_depthwise_kernel(a, in_channels):
"""Transform a 2D array into a convolution kernel"""
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1, 1])
a = tf.constant(a, dtype=tf.float32)
a = tf.tile(a, [1, 1, in_channels, 1])
return a
def dilate(image, filter_size=2):
rank = len(image.get_shape())
if rank == 3:
image = tf.expand_dims(image, axis=0) # Add batch dim
depth = image.get_shape().as_list()[-1]
filter = np.zeros((filter_size, filter_size, depth)) # I don't know why filter with all zeros works...
output = tf.nn.dilation2d(image, filter, strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding="SAME", name='dilation2d')
if rank == 3:
return output[0]
else:
return output
# rank = len(input.get_shape())
# channels = input.get_shape().as_list()[-1]
# kernel_size = 2*radius + 1
# kernel_array = np.ones((kernel_size, kernel_size)) / (kernel_size*kernel_size)
# kernel = make_depthwise_kernel(kernel_array, channels)
# if rank == 3:
# input = tf.expand_dims(input, axis=0) # Add batch dim
# output = tf.nn.depthwise_conv2d(input, kernel, [1, 1, 1, 1], padding='SAME')
# if rank == 3:
# return output[0]
# else:
# return output
def gaussian_blur(image, filter_size, mean, std):
def make_gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
mean = float(mean)
std= float(std)
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start=-size, limit=size + 1, dtype=tf.float32))
gauss_kernel = tf.einsum('i,j->ij',
vals,
vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
gauss_kernel = make_gaussian_kernel(filter_size, mean, std)
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
image_blurred = tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="SAME")
return image_blurred
def create_array_to_feed_placeholder(placeholder):
shape = placeholder.get_shape().as_list()
shape_removed_none = []
for dim in shape:
if dim is not None:
shape_removed_none.append(dim)
else:
shape_removed_none.append(0)
return np.empty(shape_removed_none)
| 9,356 | 35.694118 | 155 | py |
mapalignment | mapalignment-master/projects/utils/viz_utils.py | import sys
import numpy as np
sys.path.append("../../utils")
import polygon_utils
import skimage.io
import cv2
def save_plot_image_polygon(filepath, image, polygons):
spatial_shape = image.shape[:2]
polygons_map = polygon_utils.draw_polygon_map(polygons, spatial_shape, fill=False, edges=True,
vertices=False, line_width=1)
output_image = image[:, :, :3] # Keep first 3 channels
output_image = output_image.astype(np.float64)
output_image[np.where(0 < polygons_map[:, :, 0])] = np.array([0, 0, 255])
# output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
def save_plot_segmentation_image(filepath, segmentation_image):
output_image = np.zeros((segmentation_image.shape[0], segmentation_image.shape[1], 4))
output_image[:, :, :3] = segmentation_image[:, :, 1:4] # Remove background channel
output_image[:, :, 3] = np.sum(segmentation_image[:, :, 1:4], axis=-1) # Add alpha
output_image = output_image * 255
output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
def flow_to_image(flow):
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv = np.zeros((flow.shape[0], flow.shape[1], 3))
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv = hsv.astype(np.uint8)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return rgb
| 1,610 | 34.021739 | 98 | py |
mapalignment | mapalignment-master/projects/utils/dataset_utils.py | import os
import tensorflow as tf
class TFRecordShardWriter:
def __init__(self, filepath_format, max_records_per_shard):
self.filepath_format = filepath_format
self.max_records_per_shard = max_records_per_shard
self.current_shard_record_count = 0 # To know when to switch to a new file
self.current_shard_count = 0 # To know how to name the record file
self.writer = None
self.create_new_shard_writer()
def create_new_shard_writer(self):
filename = self.filepath_format.format(self.current_shard_count)
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.writer = tf.python_io.TFRecordWriter(filename)
self.current_shard_count += 1
def write(self, serialized_example):
self.current_shard_record_count += 1
if self.max_records_per_shard < self.current_shard_record_count:
self.create_new_shard_writer()
self.current_shard_record_count = 1
self.writer.write(serialized_example)
def close(self):
self.writer.close()
| 1,079 | 36.241379 | 83 | py |
mapalignment | mapalignment-master/projects/utils/python_utils.py |
import os
import errno
import json
from jsmin import jsmin
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def choose_first_existing_path(path_list):
for path in path_list:
if os.path.exists(os.path.expanduser(path)):
return path
return None
def get_display_availability():
return "DISPLAY" in os.environ
def get_filepaths(dir_path, endswith_str="", startswith_str=""):
if os.path.isdir(dir_path):
image_filepaths = []
for path, dnames, fnames in os.walk(dir_path):
fnames = sorted(fnames)
image_filepaths.extend([os.path.join(path, x) for x in fnames if x.endswith(endswith_str) and x.startswith(startswith_str)])
return image_filepaths
else:
raise NotADirectoryError(errno.ENOENT, os.strerror(errno.ENOENT), dir_path)
def get_dir_list_filepaths(dir_path_list, endswith_str="", startswith_str=""):
image_filepaths = []
for dir_path in dir_path_list:
image_filepaths.extend(get_filepaths(dir_path, endswith_str=endswith_str, startswith_str=startswith_str))
return image_filepaths
def save_json(filepath, data):
dirpath = os.path.dirname(filepath)
os.makedirs(dirpath, exist_ok=True)
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
return True
def load_json(filepath):
try:
with open(filepath, 'r') as f:
minified = jsmin(f.read())
data = json.loads(minified)
return data
except FileNotFoundError:
return False
def wipe_dir(dirpath):
filepaths = get_filepaths(dirpath)
for filepath in filepaths:
os.remove(filepath)
def split_list_into_chunks(l, n, pad=False):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
if pad:
chunk = l[i:i + n]
if len(chunk) < n:
chunk.extend([chunk[-1]]*(n - len(chunk)))
yield chunk
else:
yield l[i:i + n]
def params_to_str(params):
def to_str(value):
if type(value) == float and value == int(value):
return str(int(value))
return str(value)
return "_".join(["{}_{}".format(key, to_str(params[key])) for key in sorted(params.keys())])
def main():
l = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
batches = split_list_into_chunks(l, 4, pad=True)
for batch in batches:
print(batch)
if __name__ == '__main__':
main()
| 2,600 | 24.5 | 136 | py |
mapalignment | mapalignment-master/projects/utils/image_utils.py | from io import BytesIO
import math
import numpy as np
from PIL import Image
import skimage.draw
import python_utils
CV2 = False
if python_utils.module_exists("cv2"):
import cv2
CV2 = True
if python_utils.module_exists("matplotlib.pyplot"):
import matplotlib.pyplot as plt
def get_image_size(filepath):
im = Image.open(filepath)
return im.size
def load_image(image_filepath):
image = Image.open(image_filepath)
image.load()
image_array = np.array(image, dtype=np.uint8)
image.close()
return image_array
def padded_boundingbox(boundingbox, padding):
boundingbox_new = np.empty_like(boundingbox)
boundingbox_new[0:2] = boundingbox[0:2] + padding
boundingbox_new[2:4] = boundingbox[2:4] - padding
return boundingbox_new
def center_bbox(spatial_shape, output_shape):
"""
Return a bbox centered in spatial_shape with size output_shape
:param spatial_shape:
:param output_shape:
:return:
"""
center = (spatial_shape[0] / 2, spatial_shape[1] / 2)
half_output_shape = (output_shape[0] / 2, output_shape[1] / 2)
bbox = [center[0] - half_output_shape[0], center[1] - half_output_shape[1], center[0] + half_output_shape[0], center[1] + half_output_shape[1]]
bbox = bbox_to_int(bbox)
return bbox
def bbox_add_margin(bbox, margin):
bbox_new = bbox.copy()
bbox_new[0:2] -= margin
bbox_new[2:4] += margin
return bbox_new
def bbox_to_int(bbox):
bbox_new = [
int(np.floor(bbox[0])),
int(np.floor(bbox[1])),
int(np.ceil(bbox[2])),
int(np.ceil(bbox[3])),
]
return bbox_new
def draw_line_aa_in_patch(edge, patch_bounds):
rr, cc, prob = skimage.draw.line_aa(edge[0][0], edge[0][1], edge[1][0], edge[1][1])
keep_mask = (patch_bounds[0] <= rr) & (rr < patch_bounds[2]) \
& (patch_bounds[1] <= cc) & (cc < patch_bounds[3])
rr = rr[keep_mask]
cc = cc[keep_mask]
prob = prob[keep_mask]
return rr, cc, prob
def convert_array_to_jpg_bytes(image_array, mode=None):
img = Image.fromarray(image_array, mode=mode)
output = BytesIO()
img.save(output, format="JPEG", quality=90)
contents = output.getvalue()
output.close()
return contents
def displacement_map_to_transformation_maps(disp_field_map):
disp_field_map = disp_field_map.astype(np.float32)
i = np.arange(disp_field_map.shape[0], dtype=np.float32)
j = np.arange(disp_field_map.shape[1], dtype=np.float32)
iv, jv = np.meshgrid(i, j, indexing="ij")
reverse_map_i = iv + disp_field_map[:, :, 1]
reverse_map_j = jv + disp_field_map[:, :, 0]
return reverse_map_i, reverse_map_j
if CV2:
def apply_displacement_field_to_image(image, disp_field_map):
trans_map_i, trans_map_j = displacement_map_to_transformation_maps(disp_field_map)
misaligned_image = cv2.remap(image, trans_map_j, trans_map_i, cv2.INTER_CUBIC)
return misaligned_image
def apply_displacement_fields_to_image(image, disp_field_maps):
disp_field_map_count = disp_field_maps.shape[0]
misaligned_image_list = []
for i in range(disp_field_map_count):
misaligned_image = apply_displacement_field_to_image(image, disp_field_maps[i, :, :, :])
misaligned_image_list.append(misaligned_image)
return misaligned_image_list
else:
def apply_displacement_fields_to_image(image, disp_field_map):
print("cv2 is not available, the apply_displacement_fields_to_image(image, disp_field_map) function cannot work!")
def apply_displacement_fields_to_image(image, disp_field_maps):
print("cv2 is not available, the apply_displacement_fields_to_image(image, disp_field_maps) function cannot work!")
def get_axis_patch_count(length, stride, patch_res):
total_double_padding = patch_res - stride
patch_count = max(1, int(math.ceil((length - total_double_padding) / stride)))
return patch_count
def compute_patch_boundingboxes(image_size, stride, patch_res):
im_rows = image_size[0]
im_cols = image_size[1]
row_patch_count = get_axis_patch_count(im_rows, stride, patch_res)
col_patch_count = get_axis_patch_count(im_cols, stride, patch_res)
patch_boundingboxes = []
for i in range(0, row_patch_count):
if i < row_patch_count - 1:
row_slice_begin = i * stride
row_slice_end = row_slice_begin + patch_res
else:
row_slice_end = im_rows
row_slice_begin = row_slice_end - patch_res
for j in range(0, col_patch_count):
if j < col_patch_count - 1:
col_slice_begin = j*stride
col_slice_end = col_slice_begin + patch_res
else:
col_slice_end = im_cols
col_slice_begin = col_slice_end - patch_res
patch_boundingbox = np.array([row_slice_begin, col_slice_begin, row_slice_end, col_slice_end], dtype=np.int)
assert row_slice_end - row_slice_begin == col_slice_end - col_slice_begin == patch_res, "ERROR: patch does not have the requested shape"
patch_boundingboxes.append(patch_boundingbox)
return patch_boundingboxes
def clip_boundingbox(boundingbox, clip_list):
assert len(boundingbox) == len(clip_list), "len(boundingbox) should be equal to len(clip_values)"
clipped_boundingbox = []
for bb_value, clip in zip(boundingbox[:2], clip_list[:2]):
clipped_value = max(clip, bb_value)
clipped_boundingbox.append(clipped_value)
for bb_value, clip in zip(boundingbox[2:], clip_list[2:]):
clipped_value = min(clip, bb_value)
clipped_boundingbox.append(clipped_value)
return clipped_boundingbox
def crop_or_pad_image_with_boundingbox(image, patch_boundingbox):
im_rows = image.shape[0]
im_cols = image.shape[1]
row_padding_before = max(0, - patch_boundingbox[0])
col_padding_before = max(0, - patch_boundingbox[1])
row_padding_after = max(0, patch_boundingbox[2] - im_rows)
col_padding_after = max(0, patch_boundingbox[3] - im_cols)
# Center padding:
row_padding = row_padding_before + row_padding_after
col_padding = col_padding_before + col_padding_after
row_padding_before = row_padding // 2
col_padding_before = col_padding // 2
row_padding_after = row_padding - row_padding // 2
col_padding_after = col_padding - col_padding // 2
clipped_patch_boundingbox = clip_boundingbox(patch_boundingbox, [0, 0, im_rows, im_cols])
if len(image.shape) == 2:
patch = image[clipped_patch_boundingbox[0]:clipped_patch_boundingbox[2], clipped_patch_boundingbox[1]:clipped_patch_boundingbox[3]]
patch = np.pad(patch, [(row_padding_before, row_padding_after), (col_padding_before, col_padding_after)], mode="constant")
elif len(image.shape) == 3:
patch = image[clipped_patch_boundingbox[0]:clipped_patch_boundingbox[2], clipped_patch_boundingbox[1]:clipped_patch_boundingbox[3], :]
patch = np.pad(patch, [(row_padding_before, row_padding_after), (col_padding_before, col_padding_after), (0, 0)], mode="constant")
else:
print("Image input does not have the right shape/")
patch = None
return patch
def make_grid(images, padding=2, pad_value=0):
nmaps = images.shape[0]
ymaps = int(math.floor(math.sqrt(nmaps)))
xmaps = nmaps // ymaps
height, width = int(images.shape[1] + padding), int(images.shape[2] + padding)
grid = np.zeros((height * ymaps + padding, width * xmaps + padding, images.shape[3])) + pad_value
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
grid[y * height + padding:(y+1) * height, x * width + padding:(x+1) * width, :] = images[k]
k = k + 1
return grid
if __name__ == "__main__":
im_rows = 5
im_cols = 10
stride = 1
patch_res = 15
image = np.random.randint(0, 256, size=(im_rows, im_cols, 3), dtype=np.uint8)
image = Image.fromarray(image)
image = np.array(image)
plt.ion()
plt.figure(1)
plt.imshow(image)
plt.show()
# Cut patches
patch_boundingboxes = compute_patch_boundingboxes(image.shape[0:2], stride, patch_res)
plt.figure(2)
for patch_boundingbox in patch_boundingboxes:
patch = crop_or_pad_image_with_boundingbox(image, patch_boundingbox)
plt.imshow(patch)
plt.show()
input("Press <Enter> to finish...")
| 8,487 | 34.514644 | 148 | py |
mapalignment | mapalignment-master/projects/utils/print_utils.py | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
DEBUG = '\033[31;40m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_info(string):
print(bcolors.OKBLUE + string + bcolors.ENDC)
def print_success(string):
print(bcolors.OKGREEN + string + bcolors.ENDC)
def print_failure(string):
print(bcolors.FAIL + string + bcolors.ENDC)
def print_error(string):
print_failure(string)
def print_warning(string):
print(bcolors.WARNING + string + bcolors.ENDC)
def print_debug(string):
print(bcolors.DEBUG + string + bcolors.ENDC)
def print_format_table():
"""
prints table of formatted text format options
"""
for style in range(8):
for fg in range(30, 38):
s1 = ''
for bg in range(40, 48):
format = ';'.join([str(style), str(fg), str(bg)])
s1 += '\x1b[%sm %s \x1b[0m' % (format, format)
print(s1)
print('\n')
def main():
print_format_table()
print_info("Info")
print_success("Success")
print_failure("Failure")
print_error("ERROR")
print_warning("WARNING")
print_debug("Debug")
if __name__ == '__main__':
main()
| 1,293 | 19.21875 | 65 | py |
mapalignment | mapalignment-master/projects/utils/polygon_utils.py | import math
import random
import numpy as np
import scipy.spatial
from PIL import Image, ImageDraw, ImageFilter
import skimage
import python_utils
if python_utils.module_exists("skimage.measure"):
from skimage.measure import approximate_polygon
if python_utils.module_exists("shapely"):
from shapely import geometry
def is_polygon_clockwise(polygon):
rolled_polygon = np.roll(polygon, shift=1, axis=0)
double_signed_area = np.sum((rolled_polygon[:, 0] - polygon[:, 0]) * (rolled_polygon[:, 1] + polygon[:, 1]))
if 0 < double_signed_area:
return True
else:
return False
def orient_polygon(polygon, orientation="CW"):
poly_is_orientated_cw = is_polygon_clockwise(polygon)
if (poly_is_orientated_cw and orientation == "CCW") or (not poly_is_orientated_cw and orientation == "CW"):
return np.flip(polygon, axis=0)
else:
return polygon
def orient_polygons(polygons, orientation="CW"):
return [orient_polygon(polygon, orientation=orientation) for polygon in polygons]
def raster_to_polygon(image, vertex_count):
contours = skimage.measure.find_contours(image, 0.5)
contour = np.empty_like(contours[0])
contour[:, 0] = contours[0][:, 1]
contour[:, 1] = contours[0][:, 0]
# Simplify until vertex_count
tolerance = 0.1
tolerance_step = 0.1
simplified_contour = contour
while 1 + vertex_count < len(simplified_contour):
simplified_contour = approximate_polygon(contour, tolerance=tolerance)
tolerance += tolerance_step
simplified_contour = simplified_contour[:-1]
# plt.imshow(image, cmap="gray")
# plot_polygon(simplified_contour, draw_labels=False)
# plt.show()
return simplified_contour
def l2diffs(polygon1, polygon2):
"""
Computes vertex-wise L2 difference between the two polygons.
As the two polygons may not have the same starting vertex,
all shifts are considred and the shift resulting in the minimum mean L2 difference is chosen
:param polygon1:
:param polygon2:
:return:
"""
# Make polygons of equal length
if len(polygon1) != len(polygon2):
while len(polygon1) < len(polygon2):
polygon1 = np.append(polygon1, [polygon1[-1, :]], axis=0)
while len(polygon2) < len(polygon1):
polygon2 = np.append(polygon2, [polygon2[-1, :]], axis=0)
vertex_count = len(polygon1)
def naive_l2diffs(polygon1, polygon2):
naive_l2diffs_result = np.sqrt(np.power(np.sum(polygon1 - polygon2, axis=1), 2))
return naive_l2diffs_result
min_l2_diffs = naive_l2diffs(polygon1, polygon2)
min_mean_l2_diffs = np.mean(min_l2_diffs, axis=0)
for i in range(1, vertex_count):
current_naive_l2diffs = naive_l2diffs(np.roll(polygon1, shift=i, axis=0), polygon2)
current_naive_mean_l2diffs = np.mean(current_naive_l2diffs, axis=0)
if current_naive_mean_l2diffs < min_mean_l2_diffs:
min_l2_diffs = current_naive_l2diffs
min_mean_l2_diffs = current_naive_mean_l2diffs
return min_l2_diffs
def check_intersection_with_polygon(input_polygon, target_polygon):
poly1 = geometry.Polygon(input_polygon).buffer(0)
poly2 = geometry.Polygon(target_polygon).buffer(0)
intersection_poly = poly1.intersection(poly2)
intersection_area = intersection_poly.area
is_intersection = 0 < intersection_area
return is_intersection
def check_intersection_with_polygons(input_polygon, target_polygons):
"""
Returns True if there is an intersection with at least one polygon in target_polygons
:param input_polygon:
:param target_polygons:
:return:
"""
for target_polygon in target_polygons:
if check_intersection_with_polygon(input_polygon, target_polygon):
return True
return False
def polygon_area(polygon):
poly = geometry.Polygon(polygon).buffer(0)
return poly.area
def polygon_union(polygon1, polygon2):
poly1 = geometry.Polygon(polygon1).buffer(0)
poly2 = geometry.Polygon(polygon2).buffer(0)
union_poly = poly1.union(poly2)
return np.array(union_poly.exterior.coords)
def polygon_iou(polygon1, polygon2):
poly1 = geometry.Polygon(polygon1).buffer(0)
poly2 = geometry.Polygon(polygon2).buffer(0)
intersection_poly = poly1.intersection(poly2)
union_poly = poly1.union(poly2)
intersection_area = intersection_poly.area
union_area = union_poly.area
if union_area:
iou = intersection_area / union_area
else:
iou = 0
return iou
def generate_polygon(cx, cy, ave_radius, irregularity, spikeyness, vertex_count):
"""
Start with the centre of the polygon at cx, cy,
then creates the polygon by sampling points on a circle around the centre.
Random noise is added by varying the angular spacing between sequential points,
and by varying the radial distance of each point from the centre.
Params:
cx, cy - coordinates of the "centre" of the polygon
ave_radius - in px, the average radius of this polygon, this roughly controls how large the polygon is,
really only useful for order of magnitude.
irregularity - [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to
[0, 2 * pi / vertex_count]
spikeyness - [0,1] indicating how much variance there is in each vertex from the circle of radius ave_radius.
[0,1] will map to [0, ave_radius]
vertex_count - self-explanatory
Returns a list of vertices, in CCW order.
"""
irregularity = clip(irregularity, 0, 1) * 2 * math.pi / vertex_count
spikeyness = clip(spikeyness, 0, 1) * ave_radius
# generate n angle steps
angle_steps = []
lower = (2 * math.pi / vertex_count) - irregularity
upper = (2 * math.pi / vertex_count) + irregularity
angle_sum = 0
for i in range(vertex_count):
tmp = random.uniform(lower, upper)
angle_steps.append(tmp)
angle_sum = angle_sum + tmp
# normalize the steps so that point 0 and point n+1 are the same
k = angle_sum / (2 * math.pi)
for i in range(vertex_count):
angle_steps[i] = angle_steps[i] / k
# now generate the points
points = []
angle = random.uniform(0, 2 * math.pi)
for i in range(vertex_count):
r_i = clip(random.gauss(ave_radius, spikeyness), 0, 2 * ave_radius)
x = cx + r_i * math.cos(angle)
y = cy + r_i * math.sin(angle)
points.append((x, y))
angle = angle + angle_steps[i]
return points
def clip(x, mini, maxi):
if mini > maxi:
return x
elif x < mini:
return mini
elif x > maxi:
return maxi
else:
return x
def scale_bounding_box(bounding_box, scale):
half_width = math.ceil((bounding_box[2] - bounding_box[0]) * scale / 2)
half_height = math.ceil((bounding_box[3] - bounding_box[1]) * scale / 2)
center = [round((bounding_box[0] + bounding_box[2]) / 2), round((bounding_box[1] + bounding_box[3]) / 2)]
scaled_bounding_box = [int(center[0] - half_width), int(center[1] - half_height), int(center[0] + half_width),
int(center[1] + half_height)]
return scaled_bounding_box
def pad_bounding_box(bbox, pad):
return [bbox[0] + pad, bbox[1] + pad, bbox[2] - pad, bbox[3] - pad]
def compute_bounding_box(polygon, scale=1, boundingbox_margin=0, fit=None):
# Compute base bounding box
bounding_box = [np.min(polygon[:, 0]), np.min(polygon[:, 1]), np.max(polygon[:, 0]), np.max(polygon[:, 1])]
# Scale
half_width = math.ceil((bounding_box[2] - bounding_box[0]) * scale / 2)
half_height = math.ceil((bounding_box[3] - bounding_box[1]) * scale / 2)
# Add margin
half_width += boundingbox_margin
half_height += boundingbox_margin
# Compute square bounding box
if fit == "square":
half_width = half_height = max(half_width, half_height)
center = [round((bounding_box[0] + bounding_box[2]) / 2), round((bounding_box[1] + bounding_box[3]) / 2)]
bounding_box = [int(center[0] - half_width), int(center[1] - half_height), int(center[0] + half_width),
int(center[1] + half_height)]
return bounding_box
def compute_patch(polygon, patch_size):
centroid = np.mean(polygon, axis=0)
half_height = half_width = patch_size / 2
bounding_box = [math.ceil(centroid[0] - half_width), math.ceil(centroid[1] - half_height),
math.ceil(centroid[0] + half_width), math.ceil(centroid[1] + half_height)]
return bounding_box
def bounding_box_within_bounds(bounding_box, bounds):
return bounds[0] <= bounding_box[0] and bounds[1] <= bounding_box[1] and bounding_box[2] <= bounds[2] and \
bounding_box[3] <= bounds[3]
def vertex_within_bounds(vertex, bounds):
return bounds[0] <= vertex[0] <= bounds[2] and \
bounds[1] <= vertex[1] <= bounds[3]
def edge_within_bounds(edge, bounds):
return vertex_within_bounds(edge[0], bounds) and vertex_within_bounds(edge[1], bounds)
def bounding_box_area(bounding_box):
return (bounding_box[2] - bounding_box[0]) * (bounding_box[3] - bounding_box[1])
def convert_to_image_patch_space(polygon_image_space, bounding_box):
polygon_image_patch_space = np.empty_like(polygon_image_space)
polygon_image_patch_space[:, 0] = polygon_image_space[:, 0] - bounding_box[0]
polygon_image_patch_space[:, 1] = polygon_image_space[:, 1] - bounding_box[1]
return polygon_image_patch_space
def strip_redundant_vertex(vertices, epsilon=1):
assert len(vertices.shape) == 2 # Is a polygon
new_vertices = vertices
if 1 < vertices.shape[0]:
if np.sum(np.absolute(vertices[0, :] - vertices[-1, :])) < epsilon:
new_vertices = vertices[:-1, :]
return new_vertices
def remove_doubles(vertices, epsilon=0.1):
dists = np.linalg.norm(np.roll(vertices, -1, axis=0) - vertices, axis=-1)
new_vertices = vertices[epsilon < dists]
return new_vertices
def simplify_polygon(polygon, tolerance=1):
approx_polygon = approximate_polygon(polygon, tolerance=tolerance)
return approx_polygon
def simplify_polygons(polygons, tolerance=1):
approx_polygons = []
for polygon in polygons:
approx_polygon = approximate_polygon(polygon, tolerance=tolerance)
approx_polygons.append(approx_polygon)
return approx_polygons
def pad_polygon(vertices, target_length):
assert len(vertices.shape) == 2 # Is a polygon
assert vertices.shape[0] <= target_length
padding_length = target_length - vertices.shape[0]
padding = np.tile(vertices[-1], [padding_length, 1])
padded_vertices = np.append(vertices, padding, axis=0)
return padded_vertices
def compute_diameter(polygon):
dist = scipy.spatial.distance.cdist(polygon, polygon)
return dist.max()
def plot_polygon(polygon, color=None, draw_labels=True, label_direction=1, indexing="xy", axis=None):
if python_utils.module_exists("matplotlib.pyplot"):
import matplotlib.pyplot as plt
if axis is None:
axis = plt.gca()
polygon_closed = np.append(polygon, [polygon[0, :]], axis=0)
if indexing == "xy=":
axis.plot(polygon_closed[:, 0], polygon_closed[:, 1], color=color, linewidth=3.0)
elif indexing == "ij":
axis.plot(polygon_closed[:, 1], polygon_closed[:, 0], color=color, linewidth=3.0)
else:
print("WARNING: Invalid indexing argument")
if draw_labels:
labels = range(1, polygon.shape[0] + 1)
for label, x, y in zip(labels, polygon[:, 0], polygon[:, 1]):
axis.annotate(
label,
xy=(x, y), xytext=(-20 * label_direction, 20 * label_direction),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.25', fc=color, alpha=0.75),
arrowprops=dict(arrowstyle='->', color=color, connectionstyle='arc3,rad=0'))
def plot_polygons(polygons, color=None, draw_labels=True, label_direction=1, indexing="xy", axis=None):
for polygon in polygons:
plot_polygon(polygon, color=color, draw_labels=draw_labels, label_direction=label_direction, indexing=indexing,
axis=axis)
def compute_edge_normal(edge):
normal = np.array([- (edge[1][1] - edge[0][1]),
edge[1][0] - edge[0][0]])
normal_norm = np.sqrt(np.sum(np.square(normal)))
normal /= normal_norm
return normal
def compute_vector_angle(x, y):
if x < 0.0:
slope = y / x
angle = np.pi + np.arctan(slope)
elif 0.0 < x:
slope = y / x
angle = np.arctan(slope)
else:
if 0 < y:
angle = np.pi / 2
else:
angle = 3 * np.pi / 2
if angle < 0.0:
angle += 2 * np.pi
return angle
def compute_edge_normal_angle_edge(edge):
normal = compute_edge_normal(edge)
normal_x = normal[1]
normal_y = normal[0]
angle = compute_vector_angle(normal_x, normal_y)
return angle
def polygon_in_bounding_box(polygon, bounding_box):
"""
Returns True if all vertices of polygons are inside bounding_box
:param polygon: [N, 2]
:param bounding_box: [row_min, col_min, row_max, col_max]
:return:
"""
result = np.all(
np.logical_and(
np.logical_and(bounding_box[0] <= polygon[:, 0], polygon[:, 0] <= bounding_box[2]),
np.logical_and(bounding_box[1] <= polygon[:, 1], polygon[:, 1] <= bounding_box[3])
)
)
return result
def filter_polygons_in_bounding_box(polygons, bounding_box):
"""
Only keep polygons that are fully inside bounding_box
:param polygons: [shape(N, 2), ...]
:param bounding_box: [row_min, col_min, row_max, col_max]
:return:
"""
filtered_polygons = []
for polygon in polygons:
if polygon_in_bounding_box(polygon, bounding_box):
filtered_polygons.append(polygon)
return filtered_polygons
def transform_polygon_to_bounding_box_space(polygon, bounding_box):
"""
:param polygon: shape(N, 2)
:param bounding_box: [row_min, col_min, row_max, col_max]
:return:
"""
assert len(polygon.shape) and polygon.shape[1] == 2, "polygon should have shape (N, 2), not shape {}".format(
polygon.shape)
assert len(bounding_box) == 4, "bounding_box should have 4 elements: [row_min, col_min, row_max, col_max]"
transformed_polygon = polygon.copy()
transformed_polygon[:, 0] -= bounding_box[0]
transformed_polygon[:, 1] -= bounding_box[1]
return transformed_polygon
def transform_polygons_to_bounding_box_space(polygons, bounding_box):
transformed_polygons = []
for polygon in polygons:
transformed_polygons.append(transform_polygon_to_bounding_box_space(polygon, bounding_box))
return transformed_polygons
def crop_polygon_to_patch(polygon, bounding_box):
return transform_polygon_to_bounding_box_space(polygon, bounding_box)
def crop_polygon_to_patch_if_touch(polygon, bounding_box):
# Verify that at least one vertex is inside bounding_box
polygon_touches_patch = np.any(
np.logical_and(
np.logical_and(bounding_box[0] <= polygon[:, 0], polygon[:, 0] <= bounding_box[2]),
np.logical_and(bounding_box[1] <= polygon[:, 1], polygon[:, 1] <= bounding_box[3])
)
)
if polygon_touches_patch:
return crop_polygon_to_patch(polygon, bounding_box)
else:
return None
def crop_polygons_to_patch_if_touch(polygons, bounding_box, return_indices=False):
if return_indices:
indices = []
cropped_polygons = []
for i, polygon in enumerate(polygons):
cropped_polygon = crop_polygon_to_patch_if_touch(polygon, bounding_box)
if cropped_polygon is not None:
cropped_polygons.append(cropped_polygon)
if return_indices:
indices.append(i)
if return_indices:
return cropped_polygons, indices
else:
return cropped_polygons
def crop_polygons_to_patch(polygons, bounding_box):
cropped_polygons = []
for polygon in polygons:
cropped_polygon = crop_polygon_to_patch(polygon, bounding_box)
if cropped_polygon is not None:
cropped_polygons.append(cropped_polygon)
return cropped_polygons
def polygon_remove_holes(polygon):
polygon_no_holes = []
for coords in polygon:
if not np.isnan(coords[0]) and not np.isnan(coords[1]):
polygon_no_holes.append(coords)
else:
break
return np.array(polygon_no_holes)
def polygons_remove_holes(polygons):
gt_polygons_no_holes = []
for polygon in polygons:
gt_polygons_no_holes.append(polygon_remove_holes(polygon))
return gt_polygons_no_holes
def apply_batch_disp_map_to_polygons(pred_disp_field_map_batch, disp_polygons_batch):
"""
:param pred_disp_field_map_batch: shape(batch_size, height, width, 2)
:param disp_polygons_batch: shape(batch_size, polygon_count, vertex_count, 2)
:return:
"""
# Apply all displacements at once
batch_count = pred_disp_field_map_batch.shape[0]
row_count = pred_disp_field_map_batch.shape[1]
col_count = pred_disp_field_map_batch.shape[2]
disp_polygons_batch_int = np.round(disp_polygons_batch).astype(np.int)
# Clip coordinates to the field map:
disp_polygons_batch_int_nearest_valid_field = np.maximum(0, disp_polygons_batch_int)
disp_polygons_batch_int_nearest_valid_field[:, :, :, 0] = np.minimum(
disp_polygons_batch_int_nearest_valid_field[:, :, :, 0], row_count - 1)
disp_polygons_batch_int_nearest_valid_field[:, :, :, 1] = np.minimum(
disp_polygons_batch_int_nearest_valid_field[:, :, :, 1], col_count - 1)
aligned_disp_polygons_batch = disp_polygons_batch.copy()
for batch_index in range(batch_count):
mask = ~np.isnan(disp_polygons_batch[batch_index, :, :, 0]) # Checking one coordinate is enough
aligned_disp_polygons_batch[batch_index, mask, 0] += pred_disp_field_map_batch[batch_index,
disp_polygons_batch_int_nearest_valid_field[
batch_index, mask, 0],
disp_polygons_batch_int_nearest_valid_field[
batch_index, mask, 1], 0].flatten()
aligned_disp_polygons_batch[batch_index, mask, 1] += pred_disp_field_map_batch[batch_index,
disp_polygons_batch_int_nearest_valid_field[
batch_index, mask, 0],
disp_polygons_batch_int_nearest_valid_field[
batch_index, mask, 1], 1].flatten()
return aligned_disp_polygons_batch
def apply_disp_map_to_polygons(disp_field_map, polygons):
"""
:param disp_field_map: shape(height, width, 2)
:param polygon_list: [shape(N, 2), shape(M, 2), ...]
:return:
"""
disp_field_map_batch = np.expand_dims(disp_field_map, axis=0)
disp_polygons = []
for polygon in polygons:
polygon_batch = np.expand_dims(np.expand_dims(polygon, axis=0), axis=0) # Add batch and polygon_count dims
disp_polygon_batch = apply_batch_disp_map_to_polygons(disp_field_map_batch, polygon_batch)
disp_polygon_batch = disp_polygon_batch[0, 0] # Remove batch and polygon_count dims
disp_polygons.append(disp_polygon_batch)
return disp_polygons
# This next function is somewhat redundant with apply_disp_map_to_polygons... (but displaces in the opposite direction)
def apply_displacement_field_to_polygons(polygons, disp_field_map):
disp_polygons = []
for polygon in polygons:
mask_nans = np.isnan(polygon) # Will be necessary when polygons with holes are handled
polygon_int = np.round(polygon).astype(np.int)
polygon_int_clipped = np.maximum(0, polygon_int)
polygon_int_clipped[:, 0] = np.minimum(disp_field_map.shape[0] - 1, polygon_int_clipped[:, 0])
polygon_int_clipped[:, 1] = np.minimum(disp_field_map.shape[1] - 1, polygon_int_clipped[:, 1])
disp_polygon = polygon.copy()
disp_polygon[~mask_nans[:, 0], 0] -= disp_field_map[polygon_int_clipped[~mask_nans[:, 0], 0],
polygon_int_clipped[~mask_nans[:, 0], 1], 0]
disp_polygon[~mask_nans[:, 1], 1] -= disp_field_map[polygon_int_clipped[~mask_nans[:, 1], 0],
polygon_int_clipped[~mask_nans[:, 1], 1], 1]
disp_polygons.append(disp_polygon)
return disp_polygons
def apply_displacement_fields_to_polygons(polygons, disp_field_maps):
disp_field_map_count = disp_field_maps.shape[0]
disp_polygons_list = []
for i in range(disp_field_map_count):
disp_polygons = apply_displacement_field_to_polygons(polygons, disp_field_maps[i, :, :, :])
disp_polygons_list.append(disp_polygons)
return disp_polygons_list
def draw_line(shape, line, width, blur_radius=0):
im = Image.new("L", (shape[1], shape[0]))
# im_px_access = im.load()
draw = ImageDraw.Draw(im)
vertex_list = []
for coords in line:
vertex = (coords[1], coords[0])
vertex_list.append(vertex)
draw.line(vertex_list, fill=255, width=width)
if 0 < blur_radius:
im = im.filter(ImageFilter.GaussianBlur(radius=blur_radius))
array = np.array(im) / 255
return array
def draw_triangle(shape, triangle, blur_radius=0):
im = Image.new("L", (shape[1], shape[0]))
# im_px_access = im.load()
draw = ImageDraw.Draw(im)
vertex_list = []
for coords in triangle:
vertex = (coords[1], coords[0])
vertex_list.append(vertex)
draw.polygon(vertex_list, fill=255)
if 0 < blur_radius:
im = im.filter(ImageFilter.GaussianBlur(radius=blur_radius))
array = np.array(im) / 255
return array
def draw_polygon(polygon, shape, fill=True, edges=True, vertices=True, line_width=3):
# TODO: handle holes in polygons
im = Image.new("RGB", (shape[1], shape[0]))
im_px_access = im.load()
draw = ImageDraw.Draw(im)
vertex_list = []
for coords in polygon:
vertex = (coords[1], coords[0])
if not np.isnan(vertex[0]) and not np.isnan(vertex[1]):
vertex_list.append(vertex)
else:
break
if edges:
draw.line(vertex_list, fill=(0, 255, 0), width=line_width)
if fill:
draw.polygon(vertex_list, fill=(255, 0, 0))
if vertices:
draw.point(vertex_list, fill=(0, 0, 255))
# Convert image to numpy array with the right number of channels
array = np.array(im)
selection = [fill, edges, vertices]
selected_array = array[:, :, selection]
return selected_array
def draw_polygons(polygons, shape, fill=True, edges=True, vertices=True, line_width=3):
# TODO: handle holes in polygons
# Channels
fill_channel_index = 0 # Always first channel
edges_channel_index = fill # If fill == True, take second channel. If not then take first
vertices_channel_index = fill + edges # Same principle as above
channel_count = fill + edges + vertices
im_draw_list = []
for channel_index in range(channel_count):
im = Image.new("L", (shape[1], shape[0]))
im_px_access = im.load()
draw = ImageDraw.Draw(im)
im_draw_list.append((im, draw))
for polygon in polygons:
vertex_list = []
for coords in polygon:
vertex = (coords[1], coords[0])
if not np.isnan(vertex[0]) and not np.isnan(vertex[1]):
vertex_list.append(vertex)
else:
break
if fill:
draw = im_draw_list[fill_channel_index][1]
draw.polygon(vertex_list, fill=255)
if edges:
draw = im_draw_list[edges_channel_index][1]
draw.line(vertex_list, fill=255, width=line_width)
if vertices:
draw = im_draw_list[vertices_channel_index][1]
draw.point(vertex_list, fill=255)
# Convert image to numpy array with the right number of channels
array_list = [np.array(im_draw[0]) for im_draw in im_draw_list]
array = np.stack(array_list, axis=-1)
return array
def draw_polygon_map(polygons, shape, fill=True, edges=True, vertices=True, line_width=3):
"""
Alias for draw_polygon function
:param polygons:
:param shape:
:param fill:
:param edges:
:param vertices:
:param line_width:
:return:
"""
return draw_polygons(polygons, shape, fill=fill, edges=edges, vertices=vertices, line_width=line_width)
def draw_polygon_maps(polygons_list, shape, fill=True, edges=True, vertices=True, line_width=3):
polygon_maps_list = []
for polygons in polygons_list:
polygon_map = draw_polygon_map(polygons, shape, fill=fill, edges=edges, vertices=vertices,
line_width=line_width)
polygon_maps_list.append(polygon_map)
disp_field_maps = np.stack(polygon_maps_list, axis=0)
return disp_field_maps
def swap_coords(polygon):
polygon_new = polygon.copy()
polygon_new[..., 0] = polygon[..., 1]
polygon_new[..., 1] = polygon[..., 0]
return polygon_new
def prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list, boundingbox=None):
assert len(gt_polygons)
# print("Starting to crop polygons")
# start = time.time()
dtype = gt_polygons[0].dtype
cropped_gt_polygons = []
cropped_disp_polygons_list = [[] for i in range(len(disp_polygons_list))]
polygon_length = 0
for polygon_index, gt_polygon in enumerate(gt_polygons):
if boundingbox is not None:
cropped_gt_polygon = crop_polygon_to_patch_if_touch(gt_polygon, boundingbox)
else:
cropped_gt_polygon = gt_polygon
if cropped_gt_polygon is not None:
cropped_gt_polygons.append(cropped_gt_polygon)
if polygon_length < cropped_gt_polygon.shape[0]:
polygon_length = cropped_gt_polygon.shape[0]
# Crop disp polygons
for disp_index, disp_polygons in enumerate(disp_polygons_list):
disp_polygon = disp_polygons[polygon_index]
if boundingbox is not None:
cropped_disp_polygon = crop_polygon_to_patch(disp_polygon, boundingbox)
else:
cropped_disp_polygon = disp_polygon
cropped_disp_polygons_list[disp_index].append(cropped_disp_polygon)
# end = time.time()
# print("Finished cropping polygons in in {}s".format(end - start))
#
# print("Starting to pad polygons")
# start = time.time()
polygon_count = len(cropped_gt_polygons)
if polygon_count:
# Add +1 to both dimensions for end-of-item NaNs
padded_gt_polygons = np.empty((polygon_count + 1, polygon_length + 1, 2), dtype=dtype)
padded_gt_polygons[:, :, :] = np.nan
padded_disp_polygons_array = np.empty((len(disp_polygons_list), polygon_count + 1, polygon_length + 1, 2),
dtype=dtype)
padded_disp_polygons_array[:, :, :] = np.nan
for i, polygon in enumerate(cropped_gt_polygons):
padded_gt_polygons[i, 0:polygon.shape[0], :] = polygon
for j, polygons in enumerate(cropped_disp_polygons_list):
for i, polygon in enumerate(polygons):
padded_disp_polygons_array[j, i, 0:polygon.shape[0], :] = polygon
else:
padded_gt_polygons = padded_disp_polygons_array = None
# end = time.time()
# print("Finished padding polygons in in {}s".format(end - start))
return padded_gt_polygons, padded_disp_polygons_array
def prepare_stages_polygons_for_tfrecord(gt_polygons, disp_polygons_list_list, boundingbox):
assert len(gt_polygons)
print(gt_polygons)
print(disp_polygons_list_list)
exit()
# print("Starting to crop polygons")
# start = time.time()
dtype = gt_polygons[0].dtype
cropped_gt_polygons = []
cropped_disp_polygons_list_list = [[[] for i in range(len(disp_polygons_list))] for disp_polygons_list in
disp_polygons_list_list]
polygon_length = 0
for polygon_index, gt_polygon in enumerate(gt_polygons):
cropped_gt_polygon = crop_polygon_to_patch_if_touch(gt_polygon, boundingbox)
if cropped_gt_polygon is not None:
cropped_gt_polygons.append(cropped_gt_polygon)
if polygon_length < cropped_gt_polygon.shape[0]:
polygon_length = cropped_gt_polygon.shape[0]
# Crop disp polygons
for stage_index, disp_polygons_list in enumerate(disp_polygons_list_list):
for disp_index, disp_polygons in enumerate(disp_polygons_list):
disp_polygon = disp_polygons[polygon_index]
cropped_disp_polygon = crop_polygon_to_patch(disp_polygon, boundingbox)
cropped_disp_polygons_list_list[stage_index][disp_index].append(cropped_disp_polygon)
# end = time.time()
# print("Finished cropping polygons in in {}s".format(end - start))
#
# print("Starting to pad polygons")
# start = time.time()
polygon_count = len(cropped_gt_polygons)
if polygon_count:
# Add +1 to both dimensions for end-of-item NaNs
padded_gt_polygons = np.empty((polygon_count + 1, polygon_length + 1, 2), dtype=dtype)
padded_gt_polygons[:, :, :] = np.nan
padded_disp_polygons_array = np.empty(
(len(disp_polygons_list_list), len(disp_polygons_list_list[0]), polygon_count + 1, polygon_length + 1, 2),
dtype=dtype)
padded_disp_polygons_array[:, :, :] = np.nan
for i, polygon in enumerate(cropped_gt_polygons):
padded_gt_polygons[i, 0:polygon.shape[0], :] = polygon
for k, cropped_disp_polygons_list in enumerate(cropped_disp_polygons_list_list):
for j, polygons in enumerate(cropped_disp_polygons_list):
for i, polygon in enumerate(polygons):
padded_disp_polygons_array[k, j, i, 0:polygon.shape[0], :] = polygon
else:
padded_gt_polygons = padded_disp_polygons_array = None
# end = time.time()
# print("Finished padding polygons in in {}s".format(end - start))
return padded_gt_polygons, padded_disp_polygons_array
def rescale_polygon(polygons, scaling_factor):
"""
:param polygons:
:return: scaling_factor
"""
if len(polygons):
rescaled_polygons = [polygon * scaling_factor for polygon in polygons]
return rescaled_polygons
else:
return polygons
def get_edge_center(edge):
return np.mean(edge, axis=0)
def get_edge_length(edge):
return np.sqrt(np.sum(np.square(edge[0] - edge[1])))
def get_edges_angle(edge1, edge2):
x1 = edge1[1, 0] - edge1[0, 0]
y1 = edge1[1, 1] - edge1[0, 1]
x2 = edge2[1, 0] - edge2[0, 0]
y2 = edge2[1, 1] - edge2[0, 1]
angle1 = compute_vector_angle(x1, y1)
angle2 = compute_vector_angle(x2, y2)
edges_angle = math.fabs(angle1 - angle2) % (2 * math.pi)
if math.pi < edges_angle:
edges_angle = 2 * math.pi - edges_angle
return edges_angle
def compute_angle_two_points(point_source, point_target):
vector = point_target - point_source
angle = compute_vector_angle(vector[0], vector[1])
return angle
def compute_angle_three_points(point_source, point_target1, point_target2):
squared_dist_source_target1 = math.pow((point_source[0] - point_target1[0]), 2) + math.pow(
(point_source[1] - point_target1[1]), 2)
squared_dist_source_target2 = math.pow((point_source[0] - point_target2[0]), 2) + math.pow(
(point_source[1] - point_target2[1]), 2)
squared_dist_target1_target2 = math.pow((point_target1[0] - point_target2[0]), 2) + math.pow(
(point_target1[1] - point_target2[1]), 2)
dist_source_target1 = math.sqrt(squared_dist_source_target1)
dist_source_target2 = math.sqrt(squared_dist_source_target2)
try:
cos = (squared_dist_source_target1 + squared_dist_source_target2 - squared_dist_target1_target2) / (
2 * dist_source_target1 * dist_source_target2)
except ZeroDivisionError:
return float('inf')
cos = max(min(cos, 1),
-1) # Avoid some math domain error due to cos being slightly bigger than 1 (from floating point operations)
angle = math.acos(cos)
return angle
def are_edges_overlapping(edge1, edge2, threshold):
"""
Checks if at least 2 different vertices of either edge lies on the other edge: it characterizes an overlap
:param edge1:
:param edge2:
:param threshold:
:return:
"""
count_list = [
is_vertex_on_edge(edge1[0], edge2, threshold),
is_vertex_on_edge(edge1[1], edge2, threshold),
is_vertex_on_edge(edge2[0], edge1, threshold),
is_vertex_on_edge(edge2[1], edge1, threshold),
]
# Count number of identical vertices
identical_vertex_list = [
np.array_equal(edge1[0], edge2[0]),
np.array_equal(edge1[0], edge2[1]),
np.array_equal(edge1[1], edge2[0]),
np.array_equal(edge1[1], edge2[1]),
]
adjusted_count = np.sum(count_list) - np.sum(identical_vertex_list)
return 2 <= adjusted_count
# def are_edges_collinear(edge1, edge2, angle_threshold):
# edges_angle = get_edges_angle(edge1, edge2)
# return edges_angle < angle_threshold
def get_line_intersect(a1, a2, b1, b2):
"""
Returns the point of intersection of the lines passing through a2,a1 and b2,b1.
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([a1, a2, b1, b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return float('inf'), float('inf')
return x / z, y / z
def are_edges_intersecting(edge1, edge2, epsilon=1e-6):
"""
edge1 and edge2 should not have a common vertex between them
:param edge1:
:param edge2:
:return:
"""
intersect = get_line_intersect(edge1[0], edge1[1], edge2[0], edge2[1])
# print(edge1)
# print(edge2)
# print(intersect)
if intersect[0] == float('inf') or intersect[1] == float('inf'):
# Lines don't intersect
return False
else:
# Lines intersect
# Check if intersect point belongs to both edges
angle1 = compute_angle_three_points(intersect, edge1[0], edge1[1])
angle2 = compute_angle_three_points(intersect, edge2[0], edge2[1])
intersect_belongs_to_edges = (math.pi - epsilon) < angle1 and (math.pi - epsilon) < angle2
return intersect_belongs_to_edges
def shorten_edge(edge, length_to_cut1, length_to_cut2, min_length):
center = get_edge_center(edge)
total_length = get_edge_length(edge)
new_length = total_length - length_to_cut1 - length_to_cut2
if min_length <= new_length:
scale = new_length / total_length
new_edge = (edge.copy() - center) * scale + center
return new_edge
else:
return None
def is_edge_in_triangle(edge, triangle):
return edge[0] in triangle and edge[1] in triangle
def get_connectivity_of_edge(edge, triangles):
connectivity = 0
for triangle in triangles:
connectivity += is_edge_in_triangle(edge, triangle)
return connectivity
def get_connectivity_of_edges(edges, triangles):
connectivity_of_edges = []
for edge in edges:
connectivity_of_edge = get_connectivity_of_edge(edge, triangles)
connectivity_of_edges.append(connectivity_of_edge)
return connectivity_of_edges
def polygon_to_closest_int(polygons):
int_polygons = []
for polygon in polygons:
int_polygon = np.round(polygon)
int_polygons.append(int_polygon)
return int_polygons
def is_vertex_on_edge(vertex, edge, threshold):
"""
:param vertex:
:param edge:
:param threshold:
:return:
"""
# Compare distances sum to edge length
edge_length = get_edge_length(edge)
dist1 = get_edge_length([vertex, edge[0]])
dist2 = get_edge_length([vertex, edge[1]])
vertex_on_edge = (dist1 + dist2) < (edge_length + threshold)
return vertex_on_edge
def get_face_edges(face_vertices):
edges = []
prev_vertex = face_vertices[0]
for vertex in face_vertices[1:]:
edge = (prev_vertex, vertex)
edges.append(edge)
# For next iteration:
prev_vertex = vertex
return edges
def find_edge_in_face(edge, face_vertices):
# Copy inputs list so that we don't modify it
face_vertices = face_vertices[:]
face_vertices.append(face_vertices[0]) # Close face (does not matter if it is already closed)
edges = get_face_edges(face_vertices)
index = edges.index(edge)
return index
def clean_degenerate_face_edges(face_vertices):
def recursive_clean_degenerate_face_edges(open_face_vertices):
face_vertex_count = len(open_face_vertices)
cleaned_open_face_vertices = []
skip = False
for index in range(face_vertex_count):
if skip:
skip = False
else:
prev_vertex = open_face_vertices[(index - 1) % face_vertex_count]
vertex = open_face_vertices[index]
next_vertex = open_face_vertices[(index + 1) % face_vertex_count]
if prev_vertex != next_vertex:
cleaned_open_face_vertices.append(vertex)
else:
skip = True
if len(cleaned_open_face_vertices) < face_vertex_count:
return recursive_clean_degenerate_face_edges(cleaned_open_face_vertices)
else:
return cleaned_open_face_vertices
open_face_vertices = face_vertices[:-1]
cleaned_face_vertices = recursive_clean_degenerate_face_edges(open_face_vertices)
# Close cleaned_face_vertices
cleaned_face_vertices.append(cleaned_face_vertices[0])
return cleaned_face_vertices
def merge_vertices(main_face_vertices, extra_face_vertices, common_edge):
sorted_common_edge = tuple(sorted(common_edge))
open_face_vertices_pair = (main_face_vertices[:-1], extra_face_vertices[:-1])
face_index = 0 # 0: current_face == main_face, 1: current_face == extra_face
vertex_index = 0
start_vertex = vertex = open_face_vertices_pair[face_index][vertex_index]
merged_face_vertices = [start_vertex]
faces_merged = False
while not faces_merged:
# Get next vertex
next_vertex_index = (vertex_index + 1) % len(open_face_vertices_pair[face_index])
next_vertex = open_face_vertices_pair[face_index][next_vertex_index]
edge = (vertex, next_vertex)
sorted_edge = tuple(sorted(edge))
if sorted_edge == sorted_common_edge:
# Switch current face
face_index = 1 - face_index
# Find vertex_index in new current face
reverse_edge = (edge[1], edge[0]) # Because we are now on the other face
edge_index = find_edge_in_face(reverse_edge, open_face_vertices_pair[face_index])
vertex_index = edge_index + 1 # Index of the second vertex of edge
# vertex_index = open_face_vertices_pair[face_index].index(vertex)
vertex_index = (vertex_index + 1) % len(open_face_vertices_pair[face_index])
vertex = open_face_vertices_pair[face_index][vertex_index]
merged_face_vertices.append(vertex)
faces_merged = vertex == start_vertex # This also makes the merged_face closed
# Remove degenerate face edges (edges where the face if on both sides of it)
cleaned_merged_face_vertices = clean_degenerate_face_edges(merged_face_vertices)
return cleaned_merged_face_vertices
if __name__ == "__main__":
# polygon = np.array([
# [0, 0],
# [1, 0],
# [1, 1],
# [np.nan, np.nan],
# [0, 0],
# [1, 0],
# [1, 1],
# [np.nan, np.nan],
# ], dtype=np.float32)
# polygons = [
# polygon.copy(),
# polygon.copy(),
# polygon.copy(),
# polygon.copy() + 100,
# ]
#
# bounding_box = [10, 10, 100, 100] # Top left corner x, y, bottom right corner x, y
#
# cropped_polygons = crop_polygons_to_patch(polygons, bounding_box)
# print(cropped_polygons)
# edge1 = np.array([
# [0, 0],
# [1, 0],
# ])
# edge2 = np.array([
# [1, 0],
# [2, 0],
# ])
# edge_radius = 0.1
# edges_overlapping = are_edges_overlapping(edge1, edge2, edge_radius)
# print("edges_overlapping:")
# print(edges_overlapping)
face_vertices = [215, 238, 220, 201, 193, 194, 195, 199, 213, 219, 235, 238, 215]
# face_vertices = [1, 2, 3, 4, 5, 4, 3, 6, 1]
print(face_vertices)
cleaned_face_vertices = clean_degenerate_face_edges(face_vertices)
print(cleaned_face_vertices)
| 42,079 | 36.437722 | 131 | py |
mapalignment | mapalignment-master/projects/utils/geo_utils.py | import numpy as np
import time
import json
import os.path
from osgeo import gdal, ogr
from osgeo import osr
import overpy
# from fiona.crs import from_epsg
# import fiona
from pyproj import Proj, transform
import polygon_utils
import math_utils
import print_utils
QUERY_BASE = \
"""
<osm-script timeout="900" element-limit="1073741824">
<union>
<query type="way">
<has-kv k="{0}"/>
<bbox-query s="{1}" w="{2}" n="{3}" e="{4}"/>
</query>
<recurse type="way-node" into="nodes"/>
</union>
<print/>
</osm-script>
"""
WGS84_WKT = """
GEOGCS["GCS_WGS_1984",
DATUM["WGS_1984",
SPHEROID["WGS_84",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]]
"""
CRS = {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'}
def get_coor_in_space(image_filepath):
"""
:param image_filepath: Path to geo-referenced tif image
:return: coor in original space and in wsg84 spatial reference and original geotransform
:return: geo transform (x_min, res, 0, y_max, 0, -res)
:return: [[OR_x_min,OR_y_min,OR_x_max,OR_y_max],[TR_x_min,TR_y_min,TR_x_max,TR_y_max]]
"""
# print(" get_coor_in_space(image_filepath)")
ds = gdal.Open(image_filepath)
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
x_min = gt[0]
y_min = gt[3] + width * gt[4] + height * gt[5]
x_max = gt[0] + width * gt[1] + height * gt[2]
y_max = gt[3]
prj = ds.GetProjection()
srs = osr.SpatialReference(wkt=prj)
coor_sys = srs.GetAttrValue("PROJCS|AUTHORITY", 1)
if coor_sys is None:
coor_sys = srs.GetAttrValue("GEOGCS|AUTHORITY", 1)
new_cs = osr.SpatialReference()
new_cs.ImportFromWkt(WGS84_WKT)
# print(srs, new_cs)
transform = osr.CoordinateTransformation(srs, new_cs)
lat_long_min = transform.TransformPoint(x_min, y_min)
lat_long_max = transform.TransformPoint(x_max, y_max)
coor = [[x_min, y_min, x_max, y_max], [lat_long_min[0], lat_long_min[1], lat_long_max[0], lat_long_max[1]]]
return coor, gt, coor_sys
def get_osm_data(coor_query):
"""
:param coor_query: [x_min, min_z, x_max, y_max]
:return: OSM query result
"""
api = overpy.Overpass()
query_buildings = QUERY_BASE.format("building", coor_query[1], coor_query[0], coor_query[3], coor_query[2])
query_successful = False
wait_duration = 60
result = None
while not query_successful:
try:
result = api.query(query_buildings)
query_successful = True
except overpy.exception.OverpassGatewayTimeout or overpy.exception.OverpassTooManyRequests or ConnectionResetError:
print("OSM server overload. Waiting for {} seconds before querying again...".format(wait_duration))
time.sleep(wait_duration)
wait_duration *= 2 # Multiply wait time by 2 for the next time
return result
def proj_to_epsg_space(nodes, coor_sys):
original = Proj(CRS)
destination = Proj(init='EPSG:{}'.format(coor_sys))
polygon = []
for node in nodes:
polygon.append(transform(original, destination, node.lon, node.lat))
return np.array(polygon)
def compute_epsg_to_image_mat(coor, gt):
x_min = coor[0][0]
y_max = coor[0][3]
transform_mat = np.array([
[gt[1], 0, 0],
[0, gt[5], 0],
[x_min, y_max, 1],
])
return np.linalg.inv(transform_mat)
def compute_image_to_epsg_mat(coor, gt):
x_min = coor[0][0]
y_max = coor[0][3]
transform_mat = np.array([
[gt[1], 0, 0],
[0, gt[5], 0],
[x_min, y_max, 1],
])
return transform_mat
def apply_transform_mat(polygon_epsg_space, transform_mat):
polygon_epsg_space_homogeneous = math_utils.to_homogeneous(polygon_epsg_space)
polygon_image_space_homogeneous = np.matmul(polygon_epsg_space_homogeneous, transform_mat)
polygon_image_space = math_utils.to_euclidian(polygon_image_space_homogeneous)
return polygon_image_space
def get_polygons_from_osm(image_filepath, tag=""):
coor, gt, coor_system = get_coor_in_space(image_filepath)
transform_mat = compute_epsg_to_image_mat(coor, gt)
osm_data = get_osm_data(coor[1])
polygons = []
for way in osm_data.ways:
if way.tags.get(tag, "n/a") != 'n/a':
# polygon = way.nodes[:-1] # Start and end vertex are the same so remove the end vertex
polygon = way.nodes
polygon_epsg_space = proj_to_epsg_space(polygon, coor_system)
polygon_image_space = apply_transform_mat(polygon_epsg_space, transform_mat)
polygon_image_space = polygon_utils.swap_coords(polygon_image_space)
polygons.append(polygon_image_space)
return polygons
def get_polygons_from_shapefile(image_filepath, input_shapefile_filepath):
coor, gt, coor_system = get_coor_in_space(image_filepath)
transform_mat = compute_epsg_to_image_mat(coor, gt)
file = ogr.Open(input_shapefile_filepath)
assert file is not None, "File {} does not exist!".format(input_shapefile_filepath)
shape = file.GetLayer(0)
feature_count = shape.GetFeatureCount()
polygons = []
properties_list = []
for feature_index in range(feature_count):
feature = shape.GetFeature(feature_index)
raw_json = feature.ExportToJson()
parsed_json = json.loads(raw_json)
# Extract polygon:
polygon = np.array(parsed_json["geometry"]["coordinates"][0])
assert len(polygon.shape) == 2, "polygon should have shape (n, d)"
if 2 < polygon.shape[1]:
print_utils.print_warning("WARNING: polygon from shapefile has shape {}. Will discard extra values to have polygon with shape ({}, 2)".format(polygon.shape, polygon.shape[0]))
polygon = polygon[:, :2]
polygon_epsg_space = polygon
polygon_image_space = apply_transform_mat(polygon_epsg_space, transform_mat)
polygon_image_space = polygon_utils.swap_coords(polygon_image_space)
polygons.append(polygon_image_space)
# Extract properties:
if "properties" in parsed_json:
properties = parsed_json["properties"]
properties_list.append(properties)
if properties_list:
return polygons, properties_list
else:
return polygons
def create_ogr_polygon(polygon, transform_mat):
polygon_swapped_coords = polygon_utils.swap_coords(polygon)
polygon_epsg = apply_transform_mat(polygon_swapped_coords, transform_mat)
ring = ogr.Geometry(ogr.wkbLinearRing)
for coord in polygon_epsg:
ring.AddPoint(coord[0], coord[1])
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return poly.ExportToWkt()
def create_ogr_polygons(polygons, transform_mat):
ogr_polygons = []
for polygon in polygons:
ogr_polygons.append(create_ogr_polygon(polygon, transform_mat))
return ogr_polygons
def save_shapefile_from_polygons(polygons, image_filepath, output_shapefile_filepath, properties_list=None):
"""
https://gis.stackexchange.com/a/52708/8104
"""
if properties_list is not None:
assert len(polygons) == len(properties_list), "polygons and properties_list should have the same length"
coor, gt, coor_system = get_coor_in_space(image_filepath)
transform_mat = compute_image_to_epsg_mat(coor, gt)
# Convert polygons to ogr_polygons
ogr_polygons = create_ogr_polygons(polygons, transform_mat)
driver = ogr.GetDriverByName('Esri Shapefile')
ds = driver.CreateDataSource(output_shapefile_filepath)
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
layer = ds.CreateLayer('', None, ogr.wkbPolygon)
# Add one attribute
field_name_list = []
field_type_list = []
if properties_list is not None:
for properties in properties_list:
for (key, value) in properties.items():
if key not in field_name_list:
field_name_list.append(key)
field_type_list.append(type(value))
for (name, py_type) in zip(field_name_list, field_type_list):
if py_type == int:
ogr_type = ogr.OFTInteger
elif py_type == float:
print("is float")
ogr_type = ogr.OFTReal
elif py_type == str:
ogr_type = ogr.OFTString
else:
ogr_type = ogr.OFTInteger
layer.CreateField(ogr.FieldDefn(name, ogr_type))
defn = layer.GetLayerDefn()
for index in range(len(ogr_polygons)):
ogr_polygon = ogr_polygons[index]
if properties_list is not None:
properties = properties_list[index]
else:
properties = {}
# Create a new feature (attribute and geometry)
feat = ogr.Feature(defn)
for (key, value) in properties.items():
feat.SetField(key, value)
# Make a geometry, from Shapely object
geom = ogr.CreateGeometryFromWkt(ogr_polygon)
feat.SetGeometry(geom)
layer.CreateFeature(feat)
feat = geom = None # destroy these
# Save and close everything
ds = layer = feat = geom = None
def indices_of_biggest_intersecting_polygon(polygon_list):
"""
Assumes polygons which intersect follow each other on the order given by polygon_list.
This avoids the huge complexity of looking for an intersection between every polygon.
:param ori_gt_polygons:
:return:
"""
keep_index_list = []
current_cluster = [] # Indices of the polygons belonging to the current cluster (their union has one component)
for index, polygon in enumerate(polygon_list):
# First, check if polygon intersects with current_cluster:
current_cluster_polygons = [polygon_list[index] for index in current_cluster]
is_intersection = polygon_utils.check_intersection_with_polygons(polygon, current_cluster_polygons)
if is_intersection:
# Just add polygon to the cluster, nothing else to do
current_cluster.append(index)
else:
# This mean the current polygon is part of the next cluster.
# First, find the biggest polygon in the current cluster
cluster_max_index = 0
cluster_max_area = 0
for cluster_polygon_index in current_cluster:
cluster_polygon = polygon_list[cluster_polygon_index]
area = polygon_utils.polygon_area(cluster_polygon)
if cluster_max_area < area:
cluster_max_area = area
cluster_max_index = cluster_polygon_index
# Add index of the biggest polygon to the keep_index_list:
keep_index_list.append(cluster_max_index)
# Second, create a new cluster with the current polygon index
current_cluster = [index]
return keep_index_list
def get_pixelsize(filepath):
raster = gdal.Open(filepath)
gt = raster.GetGeoTransform()
pixelsize_x = gt[1]
pixelsize_y = -gt[5]
pixelsize = (pixelsize_x + pixelsize_y) / 2
return pixelsize
def main():
main_dirpath = "/workspace/data/stereo_dataset/raw/leibnitz"
image_filepath = os.path.join(main_dirpath, "leibnitz_ortho_ref_RGB.tif")
input_shapefile_filepath = os.path.join(main_dirpath, "Leibnitz_buildings_ref.shp")
output_shapefile_filepath = os.path.join(main_dirpath, "Leibnitz_buildings_ref.shifted.shp")
polygons, properties_list = get_polygons_from_shapefile(image_filepath, input_shapefile_filepath)
print(polygons[0])
print(properties_list[0])
# Add shift
shift = np.array([0, 0])
shifted_polygons = [polygon + shift for polygon in polygons]
print(shifted_polygons[0])
# Save shapefile
save_shapefile_from_polygons(shifted_polygons, image_filepath, output_shapefile_filepath, properties_list=properties_list)
if __name__ == "__main__":
main()
| 12,156 | 32.86351 | 187 | py |
mapalignment | mapalignment-master/projects/utils/math_utils.py | import numpy as np
import time
import sklearn.datasets
import skimage.transform
import python_utils
import image_utils
# if python_utils.module_exists("matplotlib.pyplot"):
# import matplotlib.pyplot as plt
CV2 = False
if python_utils.module_exists("cv2"):
import cv2
CV2 = True
# import multiprocessing
# import python_utils
# if python_utils.module_exists("joblib"):
# from joblib import Parallel, delayed
# JOBLIB = True
# else:
# JOBLIB = False
# def plot_field_map(field_map):
# from mpl_toolkits.mplot3d import Axes3D
# row = np.linspace(0, 1, field_map.shape[0])
# col = np.linspace(0, 1, field_map.shape[1])
# rr, cc = np.meshgrid(row, col, indexing='ij')
# fig = plt.figure(figsize=(18, 9))
# ax = fig.add_subplot(121, projection='3d')
# ax.plot_surface(rr, cc, field_map[:, :, 0], rstride=3, cstride=3, linewidth=1, antialiased=True)
# ax = fig.add_subplot(122, projection='3d')
# ax.plot_surface(rr, cc, field_map[:, :, 1], rstride=3, cstride=3, linewidth=1, antialiased=True)
# plt.show()
class DispFieldMapsPatchCreator:
def __init__(self, global_shape, patch_res, map_count, modes, gauss_mu_range, gauss_sig_scaling):
self.global_shape = global_shape
self.patch_res = patch_res
self.map_count = map_count
self.modes = modes
self.gauss_mu_range = gauss_mu_range
self.gauss_sig_scaling = gauss_sig_scaling
self.current_patch_index = -1
self.patch_boundingboxes = image_utils.compute_patch_boundingboxes(self.global_shape, stride=self.patch_res, patch_res=self.patch_res)
self.disp_maps = None
self.create_new_disp_maps()
def create_new_disp_maps(self):
print("DispFieldMapsPatchCreator.create_new_disp_maps()")
self.disp_maps = create_displacement_field_maps(self.global_shape, self.map_count, self.modes, self.gauss_mu_range, self.gauss_sig_scaling)
def get_patch(self):
self.current_patch_index += 1
if len(self.patch_boundingboxes) <= self.current_patch_index:
self.current_patch_index = 0
self.create_new_disp_maps()
patch_boundingbox = self.patch_boundingboxes[self.current_patch_index]
patch_disp_maps = self.disp_maps[:, patch_boundingbox[0]:patch_boundingbox[2], patch_boundingbox[1]:patch_boundingbox[3], :]
return patch_disp_maps
def to_homogeneous(array):
new_array = np.ones((array.shape[0], array.shape[1] + 1), dtype=array.dtype)
new_array[..., :-1] = array
return new_array
def to_euclidian(array_homogeneous):
array = array_homogeneous[:, 0:2] / array_homogeneous[:, 2:3]
return array
def stretch(array):
mini = np.min(array)
maxi = np.max(array)
if maxi - mini:
array -= mini
array *= 2 / (maxi - mini)
array -= 1
return array
def crop_center(array, out_shape):
assert len(out_shape) == 2, "out_shape should be of length 2"
in_shape = np.array(array.shape[:2])
start = in_shape // 2 - (out_shape // 2)
out_array = array[start[0]:start[0] + out_shape[0], start[1]:start[1] + out_shape[1], ...]
return out_array
def multivariate_gaussian(pos, mu, sigma):
"""Return the multivariate Gaussian distribution on array pos.
pos is an array constructed by packing the meshed arrays of variables
x_1, x_2, x_3, ..., x_k into its _last_ dimension.
"""
n = mu.shape[0]
sigma_det = np.linalg.det(sigma)
sigma_inv = np.linalg.inv(sigma)
N = np.sqrt((2 * np.pi) ** n * sigma_det)
# This einsum call calculates (x-mu)T.sigma-1.(x-mu) in a vectorized
# way across all the input variables.
# print("\tStarting to create multivariate Gaussian")
# start = time.time()
# print((pos - mu).shape)
# print(sigma_inv.shape)
try:
fac = np.einsum('...k,kl,...l->...', pos - mu, sigma_inv, pos - mu, optimize=True)
except:
fac = np.einsum('...k,kl,...l->...', pos - mu, sigma_inv, pos - mu)
# print(fac.shape)
# end = time.time()
# print("\tFinished Gaussian in {}s".format(end - start))
return np.exp(-fac / 2) / N
def create_multivariate_gaussian_mixture_map(shape, mode_count, mu_range, sig_scaling):
shape = np.array(shape)
# print("Starting to create multivariate Gaussian mixture")
# main_start = time.time()
dim_count = 2
downsample_factor = 4
dtype = np.float32
mu_scale = mu_range[1] - mu_range[0]
row = np.linspace(mu_range[0], mu_range[1], mu_scale*shape[0]/downsample_factor, dtype=dtype)
col = np.linspace(mu_range[0], mu_range[1], mu_scale*shape[1]/downsample_factor, dtype=dtype)
rr, cc = np.meshgrid(row, col, indexing='ij')
grid = np.stack([rr, cc], axis=2)
mus = np.random.uniform(mu_range[0], mu_range[1], (mode_count, dim_count, 2)).astype(dtype)
# gams = np.random.rand(mode_count, dim_count, 2, 2).astype(dtype)
signs = np.random.choice([1, -1], size=(mode_count, dim_count))
# print("\tAdding gaussian mixtures one by one")
# start = time.time()
# if JOBLIB:
# # Parallel computing of multivariate gaussians
# inputs = range(8)
#
# def processInput(i):
# size = 10 * i + 2000
# a = np.random.random_sample((size, size))
# b = np.random.random_sample((size, size))
# n = np.dot(a, b)
# return n
#
# num_cores = multiprocessing.cpu_count()
# print("num_cores: {}".format(num_cores))
# # num_cores = 1
#
# results = Parallel(n_jobs=num_cores)(delayed(processInput)(i) for i in inputs)
# for result in results:
# print(result.shape)
#
# gaussian_mixture = np.zeros_like(grid)
# else:
gaussian_mixture = np.zeros_like(grid)
for mode_index in range(mode_count):
for dim in range(dim_count):
sig = (sig_scaling[1] - sig_scaling[0]) * sklearn.datasets.make_spd_matrix(2) + sig_scaling[0]
# sig = (sig_scaling[1] - sig_scaling[0]) * np.dot(gams[mode_index, dim], np.transpose(gams[mode_index, dim])) + sig_scaling[0]
sig = sig.astype(dtype)
multivariate_gaussian_grid = signs[mode_index, dim] * multivariate_gaussian(grid, mus[mode_index, dim], sig)
gaussian_mixture[:, :, dim] += multivariate_gaussian_grid
# end = time.time()
# print("\tFinished adding gaussian mixtures in {}s".format(end - start))
# squared_gaussian_mixture = np.square(gaussian_mixture)
# magnitude_disp_field_map = np.sqrt(squared_gaussian_mixture[:, :, 0] + squared_gaussian_mixture[:, :, 1])
# max_magnitude = magnitude_disp_field_map.max()
gaussian_mixture[:, :, 0] = stretch(gaussian_mixture[:, :, 0])
gaussian_mixture[:, :, 1] = stretch(gaussian_mixture[:, :, 1])
# Crop
gaussian_mixture = crop_center(gaussian_mixture, shape//downsample_factor)
# plot_field_map(gaussian_mixture)
# Upsample mixture
# gaussian_mixture = skimage.transform.rescale(gaussian_mixture, downsample_factor)
gaussian_mixture = skimage.transform.resize(gaussian_mixture, shape)
main_end = time.time()
# print("Finished multivariate Gaussian mixture in {}s".format(main_end - main_start))
return gaussian_mixture
def create_displacement_field_maps(shape, map_count, modes, gauss_mu_range, gauss_sig_scaling, seed=None):
if seed is not None:
np.random.seed(seed)
disp_field_maps_list = []
for disp_field_map_index in range(map_count):
disp_field_map_normed = create_multivariate_gaussian_mixture_map(shape,
modes,
gauss_mu_range,
gauss_sig_scaling)
disp_field_maps_list.append(disp_field_map_normed)
disp_field_maps = np.stack(disp_field_maps_list, axis=0)
return disp_field_maps
def get_h_mat(t, theta, scale_offset, shear, p):
"""
Computes the homography matrix given the parameters
See https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
(fixed mistake in H_a)
:param t: 2D translation vector
:param theta: Scalar angle
:param scale_offset: 2D scaling vector
:param shear: 2D shearing vector
:param p: 2D projection vector
:return: h_mat: shape (3, 3)
"""
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
h_e = np.array([
[cos_theta, -sin_theta, t[0]],
[sin_theta, cos_theta, t[1]],
[0, 0, 1],
])
h_a = np.array([
[1 + scale_offset[0], shear[1], 0],
[shear[0], 1 + scale_offset[1], 0],
[0, 0, 1],
])
h_p = np.array([
[1, 0, 0],
[0, 1, 0],
[p[0], p[1], 1],
])
h_mat = h_e @ h_a @ h_p
return h_mat
if CV2:
def find_homography_4pt(src, dst):
"""
Estimates the homography that transforms src points into dst points.
Then converts the matrix representation into the 4 points representation.
:param src:
:param dst:
:return:
"""
h_mat, _ = cv2.findHomography(src, dst)
h_4pt = convert_h_mat_to_4pt(h_mat)
return h_4pt
def convert_h_mat_to_4pt(h_mat):
src_4pt = np.array([[
[-1, -1],
[1, -1],
[1, 1],
[-1, 1],
]], dtype=np.float64)
h_4pt = cv2.perspectiveTransform(src_4pt, h_mat)
return h_4pt
def convert_h_4pt_to_mat(h_4pt):
src_4pt = np.array([
[-1, -1],
[1, -1],
[1, 1],
[-1, 1],
], dtype=np.float32)
h_4pt = h_4pt.astype(np.float32)
h_mat = cv2.getPerspectiveTransform(src_4pt, h_4pt)
return h_mat
def field_map_to_image(field_map):
mag, ang = cv2.cartToPolar(field_map[..., 0], field_map[..., 1])
hsv = np.zeros((field_map.shape[0], field_map.shape[1], 3))
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv = hsv.astype(np.uint8)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return rgb
else:
def find_homography_4pt(src, dst):
print("cv2 is not available, the find_homography_4pt(src, dst) function cannot work!")
def convert_h_mat_to_4pt(h_mat):
print("cv2 is not available, the convert_h_mat_to_4pt(h_mat) function cannot work!")
def convert_h_4pt_to_mat(h_4pt):
print("cv2 is not available, the convert_h_4pt_to_mat(h_4pt) function cannot work!")
def field_map_to_image(field_map):
print("cv2 is not available, the field_map_to_image(field_map) function cannot work!")
def main():
shape = (220, 220)
mode_count = 30
mu_range = [0, 1]
sig_scaling = [0.0, 0.002]
create_multivariate_gaussian_mixture_map(shape, mode_count, mu_range, sig_scaling)
if __name__ == "__main__":
main()
| 11,176 | 31.873529 | 147 | py |
mapalignment | mapalignment-master/data/mapping_challenge_dataset/read.py | import sys
sys.path.append("../utils")
import visualization
from pycocotools.coco import COCO
from pycocotools import mask as cocomask
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import random
import os
FOLD_LIST = ["train", "val"]
IMAGES_DIRPATH_FORMAT = "{}/images" # var: fold
ANNOTATIONS_FILEPATH_FORMAT = "{}/annotation.json" # var: fold
# ANNOTATIONS_FILEPATH_FORMAT = "{}/annotation-small.json" # var: fold
PIXELSIZE = 0.3 # This is a guess, as that information is unknown
def swap_coords(polygon):
polygon_new = polygon.copy()
polygon_new[..., 0] = polygon[..., 1]
polygon_new[..., 1] = polygon[..., 0]
return polygon_new
class Reader:
def __init__(self, raw_dirpath, fold):
assert fold in FOLD_LIST, "Input fold={} should be in FOLD_LIST={}".format(fold, FOLD_LIST)
self.images_dirpath = os.path.join(raw_dirpath, IMAGES_DIRPATH_FORMAT.format(fold))
self.annotations_filepath = os.path.join(raw_dirpath, ANNOTATIONS_FILEPATH_FORMAT.format(fold))
self.coco = COCO(self.annotations_filepath)
self.category_id_list = self.coco.loadCats(self.coco.getCatIds())
self.image_id_list = self.coco.getImgIds(catIds=self.coco.getCatIds())
def load_image(self, image_id):
img = self.coco.loadImgs(image_id)[0]
image_filepath = os.path.join(self.images_dirpath, img["file_name"])
image = io.imread(image_filepath)
image_metadata = {
"filepath": image_filepath,
"pixelsize": PIXELSIZE
}
return image, image_metadata
def load_polygons(self, image_id):
annotation_ids = self.coco.getAnnIds(imgIds=image_id)
annotation_list = self.coco.loadAnns(annotation_ids)
polygons_coords_list = []
for annotation in annotation_list:
flattened_segmentation_list = annotation["segmentation"]
flattened_arrays = np.array(flattened_segmentation_list)
arrays = np.reshape(flattened_arrays, (flattened_arrays.shape[0], -1, 2))
arrays = swap_coords(arrays)
array_list = []
for array in arrays:
array_list.append(array)
array_list.append(np.array([[np.nan, np.nan]]))
concatenated_array = np.concatenate(array_list, axis=0)
polygons_coords_list.append(concatenated_array)
return polygons_coords_list
def load_gt_data(self, image_id):
# Load image
image_array, image_metadata = self.load_image(image_id)
# Load polygon data
gt_polygons = self.load_polygons(image_id)
# TODO: remove
visualization.save_plot_image_polygons("polygons.png", image_array, [], gt_polygons, [])
# TODO end
return image_array, image_metadata, gt_polygons
def main():
raw_dirpath = "raw"
fold = "train"
reader = Reader(raw_dirpath, fold)
image_id = reader.image_id_list[1]
image_array, image_metadata, gt_polygons = reader.load_gt_data(image_id)
print(image_array.shape)
print(image_metadata)
print(gt_polygons)
if __name__ == "__main__":
main()
| 3,220 | 29.102804 | 103 | py |
mapalignment | mapalignment-master/data/AerialImageDataset/convert_npy_to_shp.py | import os.path
import sys
import read
FILE_DIRNAME = os.getcwd()
sys.path.append(os.path.join(FILE_DIRNAME, "../../projects/utils"))
import geo_utils
RAW_DIRPATH = os.path.join(FILE_DIRNAME, "raw")
IMAGE_INFO_LIST = [
{
"city": "bloomington",
"numbers": list(range(1, 37)),
},
{
"city": "bellingham",
"numbers": list(range(1, 37)),
},
{
"city": "innsbruck",
"numbers": list(range(1, 37)),
},
{
"city": "sfo",
"numbers": list(range(1, 37)),
},
{
"city": "tyrol-e",
"numbers": list(range(1, 37)),
},
{
"city": "austin",
"numbers": list(range(1, 37)),
},
{
"city": "chicago",
"numbers": list(range(1, 37)),
},
{
"city": "kitsap",
"numbers": list(range(1, 37)),
},
{
"city": "tyrol-w",
"numbers": list(range(1, 37)),
},
{
"city": "vienna",
"numbers": list(range(1, 37)),
},
]
POLYGON_DIR_NAME = "aligned_gt_polygons_1"
SHAPEFILE_FILENAME_FORMAT = read.IMAGE_NAME_FORMAT + ".shp" # City name, number
def convert_npy_to_shp(raw_dirpath, polygon_dirname, city, number, shapefile_filename_format):
# Load polygon data
image_filepath = read.get_image_filepath(raw_dirpath, city, number)
polygons = read.load_polygons(raw_dirpath, polygon_dirname, city, number)
if polygons is not None:
output_shapefile_filepath = read.get_polygons_filepath(raw_dirpath, polygon_dirname, city, number, overwrite_polygons_filename_format=shapefile_filename_format)
geo_utils.save_shapefile_from_polygons(polygons, image_filepath, output_shapefile_filepath)
def main():
print("Converting polygons from {}".format(POLYGON_DIR_NAME))
for image_info in IMAGE_INFO_LIST:
for number in image_info["numbers"]:
print("Converting polygons of city {}, number {}".format(image_info["city"], number))
convert_npy_to_shp(RAW_DIRPATH, POLYGON_DIR_NAME, image_info["city"], number, SHAPEFILE_FILENAME_FORMAT)
if __name__ == "__main__":
main()
| 2,189 | 25.071429 | 168 | py |
mapalignment | mapalignment-master/data/AerialImageDataset/fetch_gt_polygons.py | import sys
import os
import numpy as np
sys.path.append("../../../projects/utils")
import python_utils
import polygon_utils
import geo_utils
DIR_PATH_LIST = ["./raw/train", "./raw/test"]
IMAGE_DIR_NAME = "images"
IMAGE_EXTENSION = "tif"
GT_POLYGONS_DIR_NAME = "gt_polygons"
def load_gt_polygons(image_filepath):
gt_polygons = geo_utils.get_polygons_from_osm(image_filepath, tag="building")
if len(gt_polygons):
gt_polygons = polygon_utils.polygons_remove_holes(gt_polygons) # TODO: Remove
# Remove redundant vertices
gt_polygons = polygon_utils.simplify_polygons(gt_polygons, tolerance=1)
return gt_polygons
return None
def fetch_from_images_in_directory(dir_path):
print("Fetching for images in {}".format(dir_path))
gt_polygons_dir_path = os.path.join(dir_path, GT_POLYGONS_DIR_NAME)
if not os.path.exists(gt_polygons_dir_path):
os.makedirs(gt_polygons_dir_path)
images_dir_path = os.path.join(dir_path, IMAGE_DIR_NAME)
image_filepaths = python_utils.get_filepaths(images_dir_path, IMAGE_EXTENSION)
for i, image_filepath in enumerate(image_filepaths):
image_basename = os.path.basename(image_filepath)
image_name = os.path.splitext(image_basename)[0]
print("Fetching for image {}. Progress: {}/{}".format(image_name, i+1, len(image_filepaths)))
gt_polygons_path = os.path.join(gt_polygons_dir_path, "{}.npy".format(image_name))
if not os.path.exists(gt_polygons_path):
gt_polygons = load_gt_polygons(image_filepath)
if gt_polygons is not None:
np.save(gt_polygons_path, gt_polygons)
else:
print("Fetching did not return any polygons. Skip this one.")
else:
print("GT polygons data was already fetched, skip this one. (Delete the gt_polygons file to re-fetch)")
def main():
for dir_path in DIR_PATH_LIST:
fetch_from_images_in_directory(dir_path)
if __name__ == "__main__":
main()
| 2,053 | 29.656716 | 115 | py |
mapalignment | mapalignment-master/data/AerialImageDataset/read.py | import os.path
import csv
import sys
import numpy as np
import skimage.io
CITY_METADATA_DICT = {
"bloomington": {
"fold": "test",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"bellingham": {
"fold": "test",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"innsbruck": {
"fold": "test",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"sfo": {
"fold": "test",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"tyrol-e": {
"fold": "test",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"austin": {
"fold": "train",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"chicago": {
"fold": "train",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"kitsap": {
"fold": "train",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"tyrol-w": {
"fold": "train",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
"vienna": {
"fold": "train",
"pixelsize": 0.3,
"numbers": list(range(1, 37)),
},
}
IMAGE_DIR_NAME = "images"
IMAGE_NAME_FORMAT = "{city}{number}"
IMAGE_FILENAME_FORMAT = IMAGE_NAME_FORMAT + ".tif" # City name, number
POLYGON_DIRNAME = "gt_polygons"
POLYGONS_FILENAME_FORMAT = IMAGE_NAME_FORMAT + ".npy" # City name, number
def get_tile_info_list():
tile_info_list = []
for city, info in CITY_METADATA_DICT.items():
for number in info["numbers"]:
image_info = {
"city": city,
"number": number,
}
tile_info_list.append(image_info)
return tile_info_list
def get_image_filepath(raw_dirpath, city, number):
fold = CITY_METADATA_DICT[city]["fold"]
filename = IMAGE_FILENAME_FORMAT.format(city=city, number=number)
filepath = os.path.join(raw_dirpath, fold, IMAGE_DIR_NAME, filename)
return filepath
def get_polygons_filepath(raw_dirpath, polygon_dirname, city, number, overwrite_polygons_filename_format=None):
if overwrite_polygons_filename_format is None:
polygons_filename_format = POLYGONS_FILENAME_FORMAT
else:
polygons_filename_format = overwrite_polygons_filename_format
fold = CITY_METADATA_DICT[city]["fold"]
filename = polygons_filename_format.format(city=city, number=number)
filepath = os.path.join(raw_dirpath, fold, polygon_dirname, filename)
return filepath
def load_image(raw_dirpath, city, number):
filepath = get_image_filepath(raw_dirpath, city, number)
image_array = skimage.io.imread(filepath)
# The following is writen this way for future image-specific addition of metadata:
image_metadata = {
"filepath": filepath,
"pixelsize": CITY_METADATA_DICT[city]["pixelsize"]
}
return image_array, image_metadata
def load_polygons(raw_dirpath, polygon_dirname, city, number):
filepath = get_polygons_filepath(raw_dirpath, polygon_dirname, city, number)
try:
gt_polygons = np.load(filepath)
except FileNotFoundError:
print("City {}, number {} does not have gt polygons in directory {}".format(city, number, polygon_dirname))
gt_polygons = None
return gt_polygons
def load_gt_data(raw_dirpath, city, number, overwrite_polygon_dir_name=None):
if overwrite_polygon_dir_name is None:
polygon_dirname = POLYGON_DIRNAME
else:
polygon_dirname = overwrite_polygon_dir_name
# Load image
image_array, image_metadata = load_image(raw_dirpath, city, number)
# Load polygon data
gt_polygons = load_polygons(raw_dirpath, polygon_dirname, city, number)
return image_array, image_metadata, gt_polygons
def main():
raw_dirpath = "raw"
city = "bloomington"
number = 1
image_array, image_metadata, gt_polygons = load_gt_data(raw_dirpath, city, number)
print(image_array.shape)
print(image_metadata)
print(gt_polygons)
if __name__ == "__main__":
main()
| 4,120 | 26.657718 | 115 | py |
mapalignment | mapalignment-master/data/bradbury_buildings_roads_height_dataset/download.py | import os.path
import urllib.request
import zipfile
BASE_URL = 'https://figshare.com/collections/Aerial_imagery_object_identification_dataset_for_building_and_road_detection_and_building_height_estimation/3290519'
FILE_URL_FORMAT = "https://ndownloader.figshare.com/articles/{}/versions/1"
FILE_METADATA_LIST = [
{
"dirname": "Arlington",
"id": "3485204",
},
{
"dirname": "Atlanta",
"id": "3504308",
},
{
"dirname": "Austin",
"id": "3504317",
},
{
"dirname": "DC",
"id": "3504320",
},
{
"dirname": "NewHaven",
"id": "3504323",
},
{
"dirname": "NewYork",
"id": "3504326",
},
{
"dirname": "Norfolk",
"id": "3504347",
},
{
"dirname": "SanFrancisco",
"id": "3504350",
},
{
"dirname": "Seekonk",
"id": "3504359",
},
{
"dirname": "Data_Description",
"id": "3504413",
}
]
DOWNLOAD_DIRPATH = "raw"
if not os.path.exists(DOWNLOAD_DIRPATH):
os.makedirs(DOWNLOAD_DIRPATH)
for file_metadata in FILE_METADATA_LIST:
dirname = file_metadata["dirname"]
id = file_metadata["id"]
download_dirpath = os.path.join(DOWNLOAD_DIRPATH, dirname)
zip_download_dirpath = download_dirpath + ".zip"
if not os.path.exists(download_dirpath):
print("Downloading {}".format(dirname))
urllib.request.urlretrieve(FILE_URL_FORMAT.format(id), zip_download_dirpath)
zip_ref = zipfile.ZipFile(zip_download_dirpath, 'r')
os.makedirs(download_dirpath)
zip_ref.extractall(download_dirpath)
zip_ref.close()
os.remove(zip_download_dirpath)
else:
print("Directory {} already exists so skip download (remove directory if you want to download again)")
| 1,842 | 24.957746 | 161 | py |
mapalignment | mapalignment-master/data/bradbury_buildings_roads_height_dataset/read.py | import os.path
import csv
import numpy as np
import skimage.io
CITY_METADATA_DICT = {
"Arlington": {
"pixelsize": 0.3,
"numbers": [1, 2, 3],
},
"Atlanta": {
"pixelsize": 0.1524,
"numbers": [1, 2, 3],
},
"Austin": {
"pixelsize": 0.1524,
"numbers": [1, 2, 3],
},
"DC": {
"pixelsize": 0.16,
"numbers": [1, 2],
},
"NewHaven": {
"pixelsize": 0.3,
"numbers": [1, 2],
},
"NewYork": {
"pixelsize": 0.1524,
"numbers": [1, 2, 3],
},
"Norfolk": {
"pixelsize": 0.3048,
"numbers": [1, 2, 3],
},
"SanFrancisco": {
"pixelsize": 0.3,
"numbers": [1, 2, 3],
},
"Seekonk": {
"pixelsize": 0.3,
"numbers": [1, 2, 3],
},
}
DIRNAME_FORMAT = "{city}" # City name
IMAGE_NAME_FORMAT = "{city}_{number:02d}"
IMAGE_FILENAME_EXTENSION = ".tif"
POLYGONS_FILENAME_EXTENSION = "_buildingCoord.csv"
def get_tile_info_list():
tile_info_list = []
for city, info in CITY_METADATA_DICT.items():
for number in info["numbers"]:
image_info = {
"city": city,
"number": number,
}
tile_info_list.append(image_info)
return tile_info_list
def get_image_filepath(raw_dirpath, city, number):
dirname = DIRNAME_FORMAT.format(city=city)
image_name = IMAGE_NAME_FORMAT.format(city=city, number=number)
filename = image_name + IMAGE_FILENAME_EXTENSION
filepath = os.path.join(raw_dirpath, dirname, filename)
return filepath
def get_polygons_filepath(raw_dirpath, city, number, polygons_filename_extension):
dirname = DIRNAME_FORMAT.format(city=city)
image_name = IMAGE_NAME_FORMAT.format(city=city, number=number)
filename = image_name + polygons_filename_extension
filepath = os.path.join(raw_dirpath, dirname, filename)
return filepath
def load_image(raw_dirpath, city, number):
filepath = get_image_filepath(raw_dirpath, city, number)
image_array = skimage.io.imread(filepath)
image_array = np.array(image_array, dtype=np.float64) / 255
if image_array.shape[2] == 4:
if city == "SanFrancisco":
# San Francisco needs special treatment because its transparent pixels are white!
alpha = image_array[:, :, 3:4]
image_array = image_array[:, :, :3] * alpha # Apply alpha in 4th channel (IR channel) if present
else:
image_array = image_array[:, :, :3]
image_array = np.round(image_array * 255).astype(np.uint8)
# The following is writen this way for future image-specific addition of metadata:
image_metadata = {
"filepath": filepath,
"pixelsize": CITY_METADATA_DICT[city]["pixelsize"]
}
return image_array, image_metadata
def read_csv_row(row):
# print("Polygon: {}".format(row[1]))
coord_list = []
for item in row[3:]:
try:
item_float = float(item)
coord_list.append(item_float)
except ValueError:
pass
coord_array = np.array(coord_list, dtype=np.float64)
coord_array = np.reshape(coord_array, (-1, 2))
# Switch from xy coordinates to ij:
coord_array[:, 0], coord_array[:, 1] = coord_array[:, 1], coord_array[:, 0].copy()
# polygon_utils.plot_polygon(gt_polygon_coords, color=None, draw_labels=False, label_direction=1)
# gt_polygon_coords_no_nans = np.reshape(gt_polygon_coords[~np.isnan(gt_polygon_coords)], (-1, 2))
return coord_array
def load_csv(filepath):
polygons_coords_list = []
with open(filepath, 'r') as coords_csv:
csv_reader = csv.reader(coords_csv, delimiter=',')
for row_index, row in enumerate(csv_reader):
if row_index != 0: # Skip header
polygon_coords = read_csv_row(row)
polygons_coords_list.append(polygon_coords)
return polygons_coords_list
def load_polygons_from_npy(filepath):
try:
polygons = np.load(filepath)
except FileNotFoundError:
print("Filepath {} does not exist".format(filepath))
polygons = None
return polygons
def load_polygons(raw_dirpath, city, number, polygons_filename_extension):
filepath = get_polygons_filepath(raw_dirpath, city, number, polygons_filename_extension)
_, file_extension = os.path.splitext(filepath)
if file_extension == ".csv":
return load_csv(filepath)
elif file_extension == ".npy":
return load_polygons_from_npy(filepath)
else:
print("WARNING: file extension {} is not handled by this script. Use .csv or .npy.".format(file_extension))
return None
def load_gt_data(raw_dirpath, city, number, overwrite_polygons_filename_extension=None):
if overwrite_polygons_filename_extension is None:
polygons_filename_extension = POLYGONS_FILENAME_EXTENSION
else:
polygons_filename_extension = overwrite_polygons_filename_extension
# Load image
image_array, image_metadata = load_image(raw_dirpath, city, number)
# Load CSV data
gt_polygons = load_polygons(raw_dirpath, city, number, polygons_filename_extension)
# TODO: remove
# sys.path.append("../utils")
# import visualization
# gt_polygons_filepath = get_polygons_filepath(raw_dirpath, POLYGONS_FILENAME_FORMAT, city, number)
# visualization.save_plot_image_polygons(gt_polygons_filepath + ".polygons.png", image_array, [], gt_polygons, [])
# TODO end
return image_array, image_metadata, gt_polygons
def main():
raw_dirpath = "raw"
city = "Atlanta"
number = 1
image_array, image_metadata, gt_polygons = load_gt_data(raw_dirpath, city, number)
print(image_array.shape)
print(image_metadata)
print(gt_polygons)
if __name__ == "__main__":
main()
| 5,849 | 29.952381 | 118 | py |
cowrie | cowrie-master/setup.py |
from setuptools import setup
try:
import twisted
except ImportError:
raise SystemExit("twisted not found. Make sure you "
"have installed the Twisted core package.")
setup(
packages=["cowrie", "twisted"],
include_package_data=True,
package_dir={"": "src"},
package_data={"": ["*.md"]},
use_incremental=True,
scripts=["bin/fsctl", "bin/asciinema", "bin/cowrie", "bin/createfs", "bin/playlog"],
setup_requires=["incremental", "click"],
)
import sys
def refresh_plugin_cache():
from twisted.plugin import IPlugin, getPlugins
list(getPlugins(IPlugin))
| 642 | 22.814815 | 88 | py |
cowrie | cowrie-master/src/twisted/plugins/cowrie_plugin.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import annotations
import os
import sys
from typing import ClassVar
from collections.abc import Callable
from zope.interface import implementer, provider
from incremental import Version
from twisted._version import __version__ as __twisted_version__
from twisted.application import service
from twisted.application.service import IServiceMaker
from twisted.cred import portal
from twisted.internet import reactor
from twisted.logger import ILogObserver, globalLogPublisher
from twisted.plugin import IPlugin
from twisted.python import log, usage
import cowrie.core.checkers
import cowrie.core.realm
import cowrie.ssh.factory
import cowrie.telnet.factory
from backend_pool.pool_server import PoolServerFactory
from cowrie import core
from cowrie._version import __version__ as __cowrie_version__
from cowrie.core.config import CowrieConfig
from cowrie.core.utils import create_endpoint_services, get_endpoints_from_section
from cowrie.pool_interface.handler import PoolHandler
if __twisted_version__ < Version("Twisted", 20, 0, 0):
raise ImportError(
"Your version of Twisted is too old. Please ensure your virtual environment is set up correctly."
)
class Options(usage.Options):
"""
This defines commandline options and flags
"""
# The '-c' parameters is currently ignored
optParameters: list[str] = []
optFlags: list[list[str]] = [["help", "h", "Display this help and exit."]]
@provider(ILogObserver)
def importFailureObserver(event: dict) -> None:
if "failure" in event and event["failure"].type is ImportError:
log.err(
"ERROR: %s. Please run `pip install -U -r requirements.txt` "
"from Cowrie's install directory and virtualenv to install "
"the new dependency" % event["failure"].value.message
)
globalLogPublisher.addObserver(importFailureObserver)
@implementer(IServiceMaker, IPlugin)
class CowrieServiceMaker:
tapname: ClassVar[str] = "cowrie"
description: ClassVar[str] = "She sells sea shells by the sea shore."
options = Options
output_plugins: list[Callable] = []
topService: service.Service
def __init__(self) -> None:
self.pool_handler = None
# ssh is enabled by default
self.enableSSH: bool = CowrieConfig.getboolean("ssh", "enabled", fallback=True)
# telnet is disabled by default
self.enableTelnet: bool = CowrieConfig.getboolean(
"telnet", "enabled", fallback=False
)
# pool is disabled by default, but need to check this setting in case user only wants to run the pool
self.pool_only: bool = CowrieConfig.getboolean(
"backend_pool", "pool_only", fallback=False
)
def makeService(self, options: dict) -> service.Service:
"""
Construct a TCPServer from a factory defined in Cowrie.
"""
if options["help"] is True:
print( # noqa: T201
"""Usage: twistd [options] cowrie [-h]
Options:
-h, --help print this help message.
Makes a Cowrie SSH/Telnet honeypot.
"""
)
sys.exit(1)
if os.name == "posix" and os.getuid() == 0:
print("ERROR: You must not run cowrie as root!") # noqa: T201
sys.exit(1)
tz: str = CowrieConfig.get("honeypot", "timezone", fallback="UTC")
# `system` means use the system time zone
if tz != "system":
os.environ["TZ"] = tz
log.msg("Python Version {}".format(str(sys.version).replace("\n", "")))
log.msg(
"Twisted Version {}.{}.{}".format(
__twisted_version__.major,
__twisted_version__.minor,
__twisted_version__.micro,
)
)
log.msg(
"Cowrie Version {}.{}.{}".format(
__cowrie_version__.major,
__cowrie_version__.minor,
__cowrie_version__.micro,
)
)
# check configurations
if not self.enableTelnet and not self.enableSSH and not self.pool_only:
print( # noqa: T201
"ERROR: You must at least enable SSH or Telnet, or run the backend pool"
)
sys.exit(1)
# Load output modules
self.output_plugins = []
for x in CowrieConfig.sections():
if not x.startswith("output_"):
continue
if CowrieConfig.getboolean(x, "enabled") is False:
continue
engine: str = x.split("_")[1]
try:
output = __import__(
f"cowrie.output.{engine}", globals(), locals(), ["output"]
).Output()
log.addObserver(output.emit)
self.output_plugins.append(output)
log.msg(f"Loaded output engine: {engine}")
except ImportError as e:
log.err(
f"Failed to load output engine: {engine} due to ImportError: {e}"
)
log.msg(
f"Please install the dependencies for {engine} listed in requirements-output.txt"
)
except Exception:
log.err()
log.msg(f"Failed to load output engine: {engine}")
self.topService = service.MultiService()
application = service.Application("cowrie")
self.topService.setServiceParent(application)
# initialise VM pool handling - only if proxy AND pool set to enabled, and pool is to be deployed here
# or also enabled if pool_only is true
backend_type: str = CowrieConfig.get("honeypot", "backend", fallback="shell")
proxy_backend: str = CowrieConfig.get("proxy", "backend", fallback="simple")
if (backend_type == "proxy" and proxy_backend == "pool") or self.pool_only:
# in this case we need to set some kind of pool connection
local_pool: bool = (
CowrieConfig.get("proxy", "pool", fallback="local") == "local"
)
pool_host: str = CowrieConfig.get(
"proxy", "pool_host", fallback="127.0.0.1"
)
pool_port: int = CowrieConfig.getint("proxy", "pool_port", fallback=6415)
if local_pool or self.pool_only:
# start a pool locally
f = PoolServerFactory()
f.tac = self # type: ignore
listen_endpoints = get_endpoints_from_section(
CowrieConfig, "backend_pool", 6415
)
create_endpoint_services(reactor, self.topService, listen_endpoints, f)
pool_host = "127.0.0.1" # force use of local interface
# either way (local or remote) we set up a client to the pool
# unless this instance has no SSH and Telnet (pool only)
if (self.enableTelnet or self.enableSSH) and not self.pool_only:
self.pool_handler = PoolHandler(pool_host, pool_port, self) # type: ignore
else:
# we initialise the services directly
self.pool_ready()
return self.topService
def pool_ready(self) -> None:
backend: str = CowrieConfig.get("honeypot", "backend", fallback="shell")
# this method is never called if self.pool_only is False,
# since we do not start the pool handler that would call it
if self.enableSSH:
factory = cowrie.ssh.factory.CowrieSSHFactory(backend, self.pool_handler)
factory.tac = self # type: ignore
factory.portal = portal.Portal(core.realm.HoneyPotRealm())
factory.portal.registerChecker(core.checkers.HoneypotPublicKeyChecker())
factory.portal.registerChecker(core.checkers.HoneypotPasswordChecker())
if CowrieConfig.getboolean("ssh", "auth_none_enabled", fallback=False):
factory.portal.registerChecker(core.checkers.HoneypotNoneChecker())
if CowrieConfig.has_section("ssh"):
listen_endpoints = get_endpoints_from_section(CowrieConfig, "ssh", 2222)
else:
listen_endpoints = get_endpoints_from_section(
CowrieConfig, "honeypot", 2222
)
create_endpoint_services(
reactor, self.topService, listen_endpoints, factory
)
if self.enableTelnet:
f = cowrie.telnet.factory.HoneyPotTelnetFactory(backend, self.pool_handler)
f.tac = self
f.portal = portal.Portal(core.realm.HoneyPotRealm())
f.portal.registerChecker(core.checkers.HoneypotPasswordChecker())
listen_endpoints = get_endpoints_from_section(CowrieConfig, "telnet", 2223)
create_endpoint_services(reactor, self.topService, listen_endpoints, f)
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker.
serviceMaker = CowrieServiceMaker()
| 10,607 | 38.434944 | 110 | py |
cowrie | cowrie-master/src/cowrie/_version.py | """
Provides cowrie version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update cowrie` to change this file.
from __future__ import annotations
from incremental import Version
__version__ = Version("cowrie", 2, 5, 0)
__all__: list[str] = ["__version__"]
| 303 | 20.714286 | 64 | py |
cowrie | cowrie-master/src/cowrie/output/xmpp.py | from __future__ import annotations
import json
import string
from random import choice
from wokkel import muc
from wokkel.client import XMPPClient
from wokkel.xmppim import AvailablePresence
from twisted.application import service
from twisted.python import log
from twisted.words.protocols.jabber import jid
from twisted.words.protocols.jabber.jid import JID
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class XMPPLoggerProtocol(muc.MUCClient): # type: ignore
def __init__(self, rooms, server, nick):
muc.MUCClient.__init__(self)
self.server = rooms.host
self.jrooms = rooms
self._roomOccupantMap = {}
log.msg(rooms.user)
log.msg(rooms.host)
self.nick = nick
self.last = {}
self.activity = None
def connectionInitialized(self):
"""
The bot has connected to the xmpp server, now try to join the room.
"""
self.join(self.jrooms, self.nick)
def joinedRoom(self, room):
log.msg(f"Joined room {room.name}")
def connectionMade(self):
log.msg("Connected!")
# send initial presence
self.send(AvailablePresence())
def connectionLost(self, reason):
log.msg("Disconnected!")
def onMessage(self, msg):
pass
def receivedGroupChat(self, room, user, body):
pass
def receivedHistory(self, room, user, body, dely, frm=None):
pass
class Output(cowrie.core.output.Output):
"""
xmpp output
"""
def start(self):
server = CowrieConfig.get("output_xmpp", "server")
user = CowrieConfig.get("output_xmpp", "user")
password = CowrieConfig.get("output_xmpp", "password")
muc = CowrieConfig.get("output_xmpp", "muc")
resource = "".join([choice(string.ascii_letters) for i in range(8)])
jid = user + "/" + resource
application = service.Application("honeypot")
self.run(application, jid, password, JID(None, [muc, server, None]), server)
def run(self, application, jidstr, password, muc, server):
self.xmppclient = XMPPClient(JID(jidstr), password)
if CowrieConfig.getboolean("output_xmpp", "debug", fallback=False):
self.xmppclient.logTraffic = True
(user, host, resource) = jid.parse(jidstr)
self.muc = XMPPLoggerProtocol(muc, server, user + "-" + resource)
self.muc.setHandlerParent(self.xmppclient)
self.xmppclient.setServiceParent(application)
self.anonymous = True
self.xmppclient.startService()
def write(self, logentry):
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
elif i == "time":
del logentry[i]
msgJson = json.dumps(logentry, indent=5)
self.muc.groupChat(self.muc.jrooms, msgJson)
def stop(self):
self.xmppclient.stopService()
| 2,986 | 29.479592 | 84 | py |
cowrie | cowrie-master/src/cowrie/output/rethinkdblog.py | from __future__ import annotations
import time
from datetime import datetime
import rethinkdb as r
import cowrie.core.output
from cowrie.core.config import CowrieConfig
def iso8601_to_timestamp(value):
return time.mktime(datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%fZ").timetuple())
RETHINK_DB_SEGMENT = "output_rethinkdblog"
class Output(cowrie.core.output.Output):
# noinspection PyAttributeOutsideInit
def start(self):
self.host = CowrieConfig.get(RETHINK_DB_SEGMENT, "host")
self.port = CowrieConfig.getint(RETHINK_DB_SEGMENT, "port")
self.db = CowrieConfig.get(RETHINK_DB_SEGMENT, "db")
self.table = CowrieConfig.get(RETHINK_DB_SEGMENT, "table")
self.password = CowrieConfig.get(RETHINK_DB_SEGMENT, "password", raw=True)
self.connection = r.connect(
host=self.host, port=self.port, db=self.db, password=self.password
)
try:
r.db_create(self.db).run(self.connection)
r.db(self.db).table_create(self.table).run(self.connection)
except r.RqlRuntimeError:
pass
def stop(self):
self.connection.close()
def write(self, logentry):
for i in list(logentry.keys()):
# remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
if "timestamp" in logentry:
logentry["timestamp"] = iso8601_to_timestamp(logentry["timestamp"])
r.table(self.table).insert(logentry).run(self.connection)
| 1,526 | 30.163265 | 85 | py |
cowrie | cowrie-master/src/cowrie/output/reversedns.py | from __future__ import annotations
from functools import lru_cache
import ipaddress
from twisted.internet import defer
from twisted.names import client, error
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
Output plugin used for reverse DNS lookup
"""
timeout: list[int] = [3]
def start(self):
"""
Start Output Plugin
"""
self.timeout = [CowrieConfig.getint("output_reversedns", "timeout", fallback=3)]
def stop(self):
"""
Stop Output Plugin
"""
pass
def write(self, entry):
"""
Process log entry
"""
def processConnect(result):
"""
Create log messages for connect events
"""
if result is None:
return
payload = result[0][0].payload
log.msg(
eventid="cowrie.reversedns.connect",
session=entry["session"],
format="reversedns: PTR record for IP %(src_ip)s is %(ptr)s"
" ttl=%(ttl)i",
src_ip=entry["src_ip"],
ptr=str(payload.name),
ttl=payload.ttl,
)
def processForward(result):
"""
Create log messages for forward events
"""
if result is None:
return
payload = result[0][0].payload
log.msg(
eventid="cowrie.reversedns.forward",
session=entry["session"],
format="reversedns: PTR record for IP %(dst_ip)s is %(ptr)s"
" ttl=%(ttl)i",
dst_ip=entry["dst_ip"],
ptr=str(payload.name),
ttl=payload.ttl,
)
def cbError(failure):
if failure.type == defer.TimeoutError:
log.msg("reversedns: Timeout in DNS lookup")
elif failure.type == error.DNSNameError:
# DNSNameError is the NXDOMAIN response
log.msg("reversedns: No PTR record returned")
else:
log.msg("reversedns: Error in DNS lookup")
failure.printTraceback()
if entry["eventid"] == "cowrie.session.connect":
d = self.reversedns(entry["src_ip"])
if d is not None:
d.addCallback(processConnect)
d.addErrback(cbError)
elif entry["eventid"] == "cowrie.direct-tcpip.request":
d = self.reversedns(entry["dst_ip"])
if d is not None:
d.addCallback(processForward)
d.addErrback(cbError)
@lru_cache(maxsize=1000)
def reversedns(self, addr):
"""
Perform a reverse DNS lookup on an IP
Arguments:
addr -- IPv4 Address
"""
try:
ptr = ipaddress.ip_address(addr).reverse_pointer
except ValueError:
return None
d = client.lookupPointer(ptr, timeout=self.timeout)
return d
| 3,128 | 28.242991 | 88 | py |
cowrie | cowrie-master/src/cowrie/output/mysql.py | """
MySQL output connector. Writes audit logs to MySQL database
"""
from __future__ import annotations
from twisted.enterprise import adbapi
from twisted.internet import defer
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
import mysql.connector
class ReconnectingConnectionPool(adbapi.ConnectionPool):
"""
Reconnecting adbapi connection pool for MySQL.
This class improves on the solution posted at
http://www.gelens.org/2008/09/12/reinitializing-twisted-connectionpool/
by checking exceptions by error code and only disconnecting the current
connection instead of all of them.
CR_CONN_HOST_ERROR: 2003: Cant connect to MySQL server on server (10061)
CR_SERVER_GONE_ERROR: 2006: MySQL server has gone away
CR_SERVER_LOST 2013: Lost connection to MySQL server
ER_LOCK_DEADLOCK 1213: Deadlock found when trying to get lock)
Also see:
http://twistedmatrix.com/pipermail/twisted-python/2009-July/020007.html
"""
def _runInteraction(self, interaction, *args, **kw):
try:
return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw)
except mysql.connector.Error as e:
# except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e:
if e.errno not in (
mysql.connector.errorcode.CR_CONN_HOST_ERROR,
mysql.connector.errorcode.CR_SERVER_GONE_ERROR,
mysql.connector.errorcode.CR_SERVER_LOST,
mysql.connector.errorcode.ER_LOCK_DEADLOCK,
):
raise e
log.msg(f"output_mysql: got error {e!r}, retrying operation")
conn = self.connections.get(self.threadID())
self.disconnect(conn)
# Try the interaction again
return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw)
class Output(cowrie.core.output.Output):
"""
MySQL output
"""
debug: bool = False
def start(self):
self.debug = CowrieConfig.getboolean("output_mysql", "debug", fallback=False)
port = CowrieConfig.getint("output_mysql", "port", fallback=3306)
try:
self.db = ReconnectingConnectionPool(
"mysql.connector",
host=CowrieConfig.get("output_mysql", "host"),
db=CowrieConfig.get("output_mysql", "database"),
user=CowrieConfig.get("output_mysql", "username"),
passwd=CowrieConfig.get("output_mysql", "password", raw=True),
port=port,
cp_min=1,
cp_max=1,
charset="utf8mb4",
cp_reconnect=True,
use_unicode=True,
)
# except (MySQLdb.Error, MySQLdb._exceptions.Error) as e:
except Exception as e:
log.msg(f"output_mysql: Error {e.args[0]}: {e.args[1]}")
def stop(self):
self.db.close()
def sqlerror(self, error):
"""
1146, "Table '...' doesn't exist"
1406, "Data too long for column '...' at row ..."
"""
if error.value.args[0] in (1146, 1406):
log.msg(f"output_mysql: MySQL Error: {error.value.args!r}")
log.msg(
"output_mysql: MySQL schema maybe misconfigured, doublecheck database!"
)
else:
log.msg(f"output_mysql: MySQL Error: {error.value.args!r}")
def simpleQuery(self, sql, args):
"""
Just run a deferred sql query, only care about errors
"""
if self.debug:
log.msg(f"output_mysql: MySQL query: {sql} {args!r}")
d = self.db.runQuery(sql, args)
d.addErrback(self.sqlerror)
@defer.inlineCallbacks
def write(self, entry):
if entry["eventid"] == "cowrie.session.connect":
if self.debug:
log.msg(
f"output_mysql: SELECT `id` FROM `sensors` WHERE `ip` = '{self.sensor}'"
)
r = yield self.db.runQuery(
f"SELECT `id` FROM `sensors` WHERE `ip` = '{self.sensor}'"
)
if r:
sensorid = r[0][0]
else:
if self.debug:
log.msg(
f"output_mysql: INSERT INTO `sensors` (`ip`) VALUES ('{self.sensor}')"
)
yield self.db.runQuery(
f"INSERT INTO `sensors` (`ip`) VALUES ('{self.sensor}')"
)
r = yield self.db.runQuery("SELECT LAST_INSERT_ID()")
sensorid = int(r[0][0])
self.simpleQuery(
"INSERT INTO `sessions` (`id`, `starttime`, `sensor`, `ip`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s)",
(entry["session"], entry["time"], sensorid, entry["src_ip"]),
)
elif entry["eventid"] == "cowrie.login.success":
self.simpleQuery(
"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) "
"VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))",
(
entry["session"],
1,
entry["username"],
entry["password"],
entry["time"],
),
)
elif entry["eventid"] == "cowrie.login.failed":
self.simpleQuery(
"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) "
"VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))",
(
entry["session"],
0,
entry["username"],
entry["password"],
entry["time"],
),
)
elif entry["eventid"] == "cowrie.session.params":
self.simpleQuery(
"INSERT INTO `params` (`session`, `arch`) VALUES (%s, %s)",
(entry["session"], entry["arch"]),
)
elif entry["eventid"] == "cowrie.command.input":
self.simpleQuery(
"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)",
(entry["session"], entry["time"], 1, entry["input"]),
)
elif entry["eventid"] == "cowrie.command.failed":
self.simpleQuery(
"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)",
(entry["session"], entry["time"], 0, entry["input"]),
)
elif entry["eventid"] == "cowrie.session.file_download":
self.simpleQuery(
"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)",
(
entry["session"],
entry["time"],
entry.get("url", ""),
entry["outfile"],
entry["shasum"],
),
)
elif entry["eventid"] == "cowrie.session.file_download.failed":
self.simpleQuery(
"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)",
(entry["session"], entry["time"], entry.get("url", ""), "NULL", "NULL"),
)
elif entry["eventid"] == "cowrie.session.file_upload":
self.simpleQuery(
"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)",
(
entry["session"],
entry["time"],
"",
entry["outfile"],
entry["shasum"],
),
)
elif entry["eventid"] == "cowrie.session.input":
self.simpleQuery(
"INSERT INTO `input` (`session`, `timestamp`, `realm`, `input`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)",
(entry["session"], entry["time"], entry["realm"], entry["input"]),
)
elif entry["eventid"] == "cowrie.client.version":
r = yield self.db.runQuery(
"SELECT `id` FROM `clients` WHERE `version` = %s",
(entry["version"],),
)
if r:
id = int(r[0][0])
else:
yield self.db.runQuery(
"INSERT INTO `clients` (`version`) VALUES (%s)",
(entry["version"],),
)
r = yield self.db.runQuery("SELECT LAST_INSERT_ID()")
id = int(r[0][0])
self.simpleQuery(
"UPDATE `sessions` SET `client` = %s WHERE `id` = %s",
(id, entry["session"]),
)
elif entry["eventid"] == "cowrie.client.size":
self.simpleQuery(
"UPDATE `sessions` SET `termsize` = %s WHERE `id` = %s",
("{}x{}".format(entry["width"], entry["height"]), entry["session"]),
)
elif entry["eventid"] == "cowrie.session.closed":
self.simpleQuery(
"UPDATE `sessions` "
"SET `endtime` = FROM_UNIXTIME(%s) "
"WHERE `id` = %s",
(entry["time"], entry["session"]),
)
elif entry["eventid"] == "cowrie.log.closed":
self.simpleQuery(
"INSERT INTO `ttylog` (`session`, `ttylog`, `size`) "
"VALUES (%s, %s, %s)",
(entry["session"], entry["ttylog"], entry["size"]),
)
elif entry["eventid"] == "cowrie.client.fingerprint":
self.simpleQuery(
"INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) "
"VALUES (%s, %s, %s)",
(entry["session"], entry["username"], entry["fingerprint"]),
)
elif entry["eventid"] == "cowrie.direct-tcpip.request":
self.simpleQuery(
"INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s)",
(entry["session"], entry["time"], entry["dst_ip"], entry["dst_port"]),
)
elif entry["eventid"] == "cowrie.direct-tcpip.data":
self.simpleQuery(
"INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) "
"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)",
(
entry["session"],
entry["time"],
entry["dst_ip"],
entry["dst_port"],
entry["data"],
),
)
| 11,175 | 37.143345 | 102 | py |
cowrie | cowrie-master/src/cowrie/output/telegram.py | # Simple Telegram Bot logger
import treq
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
telegram output
"""
def start(self):
self.bot_token = CowrieConfig.get("output_telegram", "bot_token")
self.chat_id = CowrieConfig.get("output_telegram", "chat_id")
def stop(self):
pass
def write(self, logentry):
for i in list(logentry.keys()):
# remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
logon_type = ""
# Prepare logon type
if "HoneyPotSSHTransport" in (logentry["system"].split(","))[0]:
logon_type = "SSH"
elif "CowrieTelnetTransport" in (logentry["system"].split(","))[0]:
logon_type = "Telnet"
# Prepare base message
msgtxt = "<strong>[Cowrie " + logentry["sensor"] + "]</strong>"
msgtxt += "\nEvent: " + logentry["eventid"]
msgtxt += "\nLogon type: " + logon_type
msgtxt += "\nSource: <code>" + logentry["src_ip"] + "</code>"
msgtxt += "\nSession: <code>" + logentry["session"] + "</code>"
if logentry["eventid"] == "cowrie.login.success":
msgtxt += "\nUsername: <code>" + logentry["username"] + "</code>"
msgtxt += "\nPassword: <code>" + logentry["password"] + "</code>"
self.send_message(msgtxt)
elif logentry["eventid"] in ["cowrie.command.failed", "cowrie.command.input"]:
msgtxt += "\nCommand: <pre>" + logentry["input"] + "</pre>"
self.send_message(msgtxt)
elif logentry["eventid"] == "cowrie.session.file_download":
msgtxt += "\nUrl: " + logentry.get("url", "")
self.send_message(msgtxt)
def send_message(self, message):
log.msg("Telegram plugin will try to call TelegramBot")
try:
treq.get(
"https://api.telegram.org/bot" + self.bot_token + "/sendMessage",
params=[
("chat_id", str(self.chat_id)),
("parse_mode", "HTML"),
("text", message),
],
)
except Exception:
log.msg("Telegram plugin request error")
| 2,326 | 34.8 | 86 | py |
cowrie | cowrie-master/src/cowrie/output/influx.py | from __future__ import annotations
import re
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
influx output
"""
def start(self):
host = CowrieConfig.get("output_influx", "host", fallback="")
port = CowrieConfig.getint("output_influx", "port", fallback=8086)
ssl = CowrieConfig.getboolean("output_influx", "ssl", fallback=False)
self.client = None
try:
self.client = InfluxDBClient(host=host, port=port, ssl=ssl, verify_ssl=ssl)
except InfluxDBClientError as e:
log.msg(f"output_influx: I/O error({e.code}): '{e.message}'")
return
if self.client is None:
log.msg("output_influx: cannot instantiate client!")
return
if CowrieConfig.has_option(
"output_influx", "username"
) and CowrieConfig.has_option("output_influx", "password"):
username = CowrieConfig.get("output_influx", "username")
password = CowrieConfig.get("output_influx", "password", raw=True)
self.client.switch_user(username, password)
try:
dbname = CowrieConfig.get("output_influx", "database_name")
except Exception:
dbname = "cowrie"
retention_policy_duration_default = "12w"
retention_policy_name = dbname + "_retention_policy"
if CowrieConfig.has_option("output_influx", "retention_policy_duration"):
retention_policy_duration = CowrieConfig.get(
"output_influx", "retention_policy_duration"
)
match = re.search(r"^\d+[dhmw]{1}$", retention_policy_duration)
if not match:
log.msg(
(
"output_influx: invalid retention policy."
"Using default '{}'.."
).format(retention_policy_duration)
)
retention_policy_duration = retention_policy_duration_default
else:
retention_policy_duration = retention_policy_duration_default
database_list = self.client.get_list_database()
dblist = [str(elem["name"]) for elem in database_list]
if dbname not in dblist:
self.client.create_database(dbname)
self.client.create_retention_policy(
retention_policy_name,
retention_policy_duration,
1,
database=dbname,
default=True,
)
else:
retention_policies_list = self.client.get_list_retention_policies(
database=dbname
)
rplist = [str(elem["name"]) for elem in retention_policies_list]
if retention_policy_name not in rplist:
self.client.create_retention_policy(
retention_policy_name,
retention_policy_duration,
1,
database=dbname,
default=True,
)
else:
self.client.alter_retention_policy(
retention_policy_name,
database=dbname,
duration=retention_policy_duration,
replication=1,
default=True,
)
self.client.switch_database(dbname)
def stop(self):
pass
def write(self, entry):
if self.client is None:
log.msg("output_influx: client object is not instantiated")
return
# event id
eventid = entry["eventid"]
# measurement init
m = {
"measurement": eventid.replace(".", "_"),
"tags": {"session": entry["session"], "src_ip": entry["src_ip"]},
"fields": {"sensor": self.sensor},
}
# event parsing
if eventid in ["cowrie.command.failed", "cowrie.command.input"]:
m["fields"].update(
{
"input": entry["input"],
}
)
elif eventid == "cowrie.session.connect":
m["fields"].update(
{
"protocol": entry["protocol"],
"src_port": entry["src_port"],
"dst_port": entry["dst_port"],
"dst_ip": entry["dst_ip"],
}
)
elif eventid in ["cowrie.login.success", "cowrie.login.failed"]:
m["fields"].update(
{
"username": entry["username"],
"password": entry["password"],
}
)
elif eventid == "cowrie.session.file_download":
m["fields"].update(
{
"shasum": entry.get("shasum"),
"url": entry.get("url"),
"outfile": entry.get("outfile"),
}
)
elif eventid == "cowrie.session.file_download.failed":
m["fields"].update({"url": entry.get("url")})
elif eventid == "cowrie.session.file_upload":
m["fields"].update(
{
"shasum": entry.get("shasum"),
"outfile": entry.get("outfile"),
}
)
elif eventid == "cowrie.session.closed":
m["fields"].update({"duration": entry["duration"]})
elif eventid == "cowrie.client.version":
m["fields"].update(
{
"version": ",".join(entry["version"]),
}
)
elif eventid == "cowrie.client.kex":
m["fields"].update(
{
"maccs": ",".join(entry["macCS"]),
"kexalgs": ",".join(entry["kexAlgs"]),
"keyalgs": ",".join(entry["keyAlgs"]),
"compcs": ",".join(entry["compCS"]),
"enccs": ",".join(entry["encCS"]),
}
)
elif eventid == "cowrie.client.size":
m["fields"].update(
{
"height": entry["height"],
"width": entry["width"],
}
)
elif eventid == "cowrie.client.var":
m["fields"].update(
{
"name": entry["name"],
"value": entry["value"],
}
)
elif eventid == "cowrie.client.fingerprint":
m["fields"].update({"fingerprint": entry["fingerprint"]})
# cowrie.direct-tcpip.data, cowrie.direct-tcpip.request
# cowrie.log.closed
# are not implemented
else:
# other events should be handled
log.msg(f"output_influx: event '{eventid}' not handled. Skipping..")
return
result = self.client.write_points([m])
if not result:
log.msg(
"output_influx: error when writing '{}' measurement"
"in the db.".format(eventid)
)
| 7,285 | 31.968326 | 87 | py |
cowrie | cowrie-master/src/cowrie/output/virustotal.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Send SSH logins to Virustotal
"""
from __future__ import annotations
import datetime
import json
import os
from typing import Any
from urllib.parse import urlencode, urlparse
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.python import log
from twisted.web import client, http_headers
from twisted.web.iweb import IBodyProducer
import cowrie.core.output
from cowrie.core.config import CowrieConfig
COWRIE_USER_AGENT = "Cowrie Honeypot"
VTAPI_URL = "https://www.virustotal.com/vtapi/v2/"
COMMENT = "First seen by #Cowrie SSH/telnet Honeypot http://github.com/cowrie/cowrie"
TIME_SINCE_FIRST_DOWNLOAD = datetime.timedelta(minutes=1)
class Output(cowrie.core.output.Output):
"""
virustotal output
"""
apiKey: str
debug: bool = False
commenttext: str
agent: Any
scan_url: bool
scan_file: bool
url_cache: dict[
str, datetime.datetime
] = {} # url and last time succesfully submitted
def start(self) -> None:
"""
Start output plugin
"""
self.apiKey = CowrieConfig.get("output_virustotal", "api_key")
self.debug = CowrieConfig.getboolean(
"output_virustotal", "debug", fallback=False
)
self.upload = CowrieConfig.getboolean(
"output_virustotal", "upload", fallback=True
)
self.comment = CowrieConfig.getboolean(
"output_virustotal", "comment", fallback=True
)
self.scan_file = CowrieConfig.getboolean(
"output_virustotal", "scan_file", fallback=True
)
self.scan_url = CowrieConfig.getboolean(
"output_virustotal", "scan_url", fallback=False
)
self.commenttext = CowrieConfig.get(
"output_virustotal", "commenttext", fallback=COMMENT
)
self.agent = client.Agent(reactor, WebClientContextFactory())
def stop(self) -> None:
"""
Stop output plugin
"""
def write(self, entry: dict[str, Any]) -> None:
if entry["eventid"] == "cowrie.session.file_download":
if self.scan_url and "url" in entry:
log.msg("Checking url scan report at VT")
self.scanurl(entry)
if self._is_new_shasum(entry["shasum"]) and self.scan_file:
log.msg("Checking file scan report at VT")
self.scanfile(entry)
elif entry["eventid"] == "cowrie.session.file_upload":
if self._is_new_shasum(entry["shasum"]) and self.scan_file:
log.msg("Checking file scan report at VT")
self.scanfile(entry)
def _is_new_shasum(self, shasum):
# Get the downloaded file's modification time
shasumfile = os.path.join(CowrieConfig.get("honeypot", "download_path"), shasum)
file_modification_time = datetime.datetime.fromtimestamp(
os.stat(shasumfile).st_mtime
)
# Assumptions:
# 1. A downloaded file that was already downloaded before is not written instead of the first downloaded file
# 2. On that stage of the code, the file that needs to be scanned in VT is supposed to be downloaded already
#
# Check:
# If the file was first downloaded more than a "period of time" (e.g 1 min) ago -
# it has been apparently scanned before in VT and therefore is not going to be checked again
if file_modification_time < datetime.datetime.now() - TIME_SINCE_FIRST_DOWNLOAD:
log.msg(f"File with shasum '{shasum}' was downloaded before")
return False
return True
def scanfile(self, entry):
"""
Check file scan report for a hash
Argument is full event so we can access full file later on
"""
vtUrl = f"{VTAPI_URL}file/report".encode()
headers = http_headers.Headers({"User-Agent": [COWRIE_USER_AGENT]})
fields = {"apikey": self.apiKey, "resource": entry["shasum"], "allinfo": 1}
body = StringProducer(urlencode(fields).encode("utf-8"))
d = self.agent.request(b"POST", vtUrl, headers, body)
def cbResponse(response):
"""
Main response callback, check HTTP response code
"""
if response.code == 200:
d = client.readBody(response)
d.addCallback(cbBody)
return d
else:
log.msg(f"VT Request failed: {response.code} {response.phrase}")
def cbBody(body):
"""
Received body
"""
return processResult(body)
def cbPartial(failure):
"""
Google HTTP Server does not set Content-Length. Twisted marks it as partial
"""
return processResult(failure.value.response)
def cbError(failure):
log.msg("VT: Error in scanfile")
failure.printTraceback()
def processResult(result):
"""
Extract the information we need from the body
"""
if self.debug:
log.msg(f"VT scanfile result: {result}")
result = result.decode("utf8")
j = json.loads(result)
log.msg("VT: {}".format(j["verbose_msg"]))
if j["response_code"] == 0:
log.msg(
eventid="cowrie.virustotal.scanfile",
format="VT: New file %(sha256)s",
session=entry["session"],
sha256=j["resource"],
is_new="true",
)
try:
b = os.path.basename(urlparse(entry["url"]).path)
if b == "":
fileName = entry["shasum"]
else:
fileName = b
except KeyError:
fileName = entry["shasum"]
if self.upload is True:
return self.postfile(entry["outfile"], fileName)
else:
return
elif j["response_code"] == 1:
log.msg("VT: response=1: this has been scanned before")
# Add detailed report to json log
scans_summary: dict[str, dict[str, str]] = {}
for feed, info in j["scans"].items():
feed_key = feed.lower()
scans_summary[feed_key] = {}
scans_summary[feed_key]["detected"] = str(info["detected"]).lower()
scans_summary[feed_key]["result"] = str(info["result"]).lower()
log.msg(
eventid="cowrie.virustotal.scanfile",
format="VT: Binary file with sha256 %(sha256)s was found malicious "
"by %(positives)s out of %(total)s feeds (scanned on %(scan_date)s)",
session=entry["session"],
positives=j["positives"],
total=j["total"],
scan_date=j["scan_date"],
sha256=j["resource"],
scans=scans_summary,
is_new="false",
)
log.msg("VT: permalink: {}".format(j["permalink"]))
elif j["response_code"] == -2:
log.msg("VT: response=-2: this has been queued for analysis already")
else:
log.msg("VT: unexpected response code: {}".format(j["response_code"]))
d.addCallback(cbResponse)
d.addErrback(cbError)
return d
def postfile(self, artifact, fileName):
"""
Send a file to VirusTotal
"""
vtUrl = f"{VTAPI_URL}file/scan".encode()
fields = {("apikey", self.apiKey)}
files = {("file", fileName, open(artifact, "rb"))}
if self.debug:
log.msg(f"submitting to VT: {files!r}")
contentType, body = encode_multipart_formdata(fields, files)
producer = StringProducer(body)
headers = http_headers.Headers(
{
"User-Agent": [COWRIE_USER_AGENT],
"Accept": ["*/*"],
"Content-Type": [contentType],
}
)
d = self.agent.request(b"POST", vtUrl, headers, producer)
def cbBody(body):
return processResult(body)
def cbPartial(failure):
"""
Google HTTP Server does not set Content-Length. Twisted marks it as partial
"""
return processResult(failure.value.response)
def cbResponse(response):
if response.code == 200:
d = client.readBody(response)
d.addCallback(cbBody)
d.addErrback(cbPartial)
return d
else:
log.msg(f"VT Request failed: {response.code} {response.phrase}")
def cbError(failure):
failure.printTraceback()
def processResult(result):
if self.debug:
log.msg(f"VT postfile result: {result}")
result = result.decode("utf8")
j = json.loads(result)
# This is always a new resource, since we did the scan before
# so always create the comment
log.msg("response=0: posting comment")
if self.comment is True:
return self.postcomment(j["resource"])
else:
return
d.addCallback(cbResponse)
d.addErrback(cbError)
return d
def scanurl(self, entry):
"""
Check url scan report for a hash
"""
if entry["url"] in self.url_cache:
log.msg(
"output_virustotal: url {} was already successfully submitted".format(
entry["url"]
)
)
return
vtUrl = f"{VTAPI_URL}url/report".encode()
headers = http_headers.Headers({"User-Agent": [COWRIE_USER_AGENT]})
fields = {
"apikey": self.apiKey,
"resource": entry["url"],
"scan": 1,
"allinfo": 1,
}
body = StringProducer(urlencode(fields).encode("utf-8"))
d = self.agent.request(b"POST", vtUrl, headers, body)
def cbResponse(response):
"""
Main response callback, checks HTTP response code
"""
if response.code == 200:
d = client.readBody(response)
d.addCallback(cbBody)
return d
else:
log.msg(f"VT Request failed: {response.code} {response.phrase}")
def cbBody(body):
"""
Received body
"""
return processResult(body)
def cbPartial(failure):
"""
Google HTTP Server does not set Content-Length. Twisted marks it as partial
"""
return processResult(failure.value.response)
def cbError(failure):
log.msg("cbError")
failure.printTraceback()
def processResult(result):
"""
Extract the information we need from the body
"""
if self.debug:
log.msg(f"VT scanurl result: {result}")
result = result.decode("utf8")
j = json.loads(result)
log.msg("VT: {}".format(j["verbose_msg"]))
# we got a status=200 assume it was successfully submitted
self.url_cache[entry["url"]] = datetime.datetime.now()
if j["response_code"] == 0:
log.msg(
eventid="cowrie.virustotal.scanurl",
format="VT: New URL %(url)s",
session=entry["session"],
url=entry["url"],
is_new="true",
)
return d
elif j["response_code"] == 1 and "scans" not in j:
log.msg(
"VT: response=1: this was submitted before but has not yet been scanned."
)
elif j["response_code"] == 1 and "scans" in j:
log.msg("VT: response=1: this has been scanned before")
# Add detailed report to json log
scans_summary: dict[str, dict[str, str]] = {}
for feed, info in j["scans"].items():
feed_key = feed.lower()
scans_summary[feed_key] = {}
scans_summary[feed_key]["detected"] = str(info["detected"]).lower()
scans_summary[feed_key]["result"] = str(info["result"]).lower()
log.msg(
eventid="cowrie.virustotal.scanurl",
format="VT: URL %(url)s was found malicious by "
"%(positives)s out of %(total)s feeds (scanned on %(scan_date)s)",
session=entry["session"],
positives=j["positives"],
total=j["total"],
scan_date=j["scan_date"],
url=j["url"],
scans=scans_summary,
is_new="false",
)
log.msg("VT: permalink: {}".format(j["permalink"]))
elif j["response_code"] == -2:
log.msg("VT: response=-2: this has been queued for analysis already")
log.msg("VT: permalink: {}".format(j["permalink"]))
else:
log.msg("VT: unexpected response code: {}".format(j["response_code"]))
d.addCallback(cbResponse)
d.addErrback(cbError)
return d
def postcomment(self, resource):
"""
Send a comment to VirusTotal with Twisted
"""
vtUrl = f"{VTAPI_URL}comments/put".encode()
parameters = {
"resource": resource,
"comment": self.commenttext,
"apikey": self.apiKey,
}
headers = http_headers.Headers({"User-Agent": [COWRIE_USER_AGENT]})
body = StringProducer(urlencode(parameters).encode("utf-8"))
d = self.agent.request(b"POST", vtUrl, headers, body)
def cbBody(body):
return processResult(body)
def cbPartial(failure):
"""
Google HTTP Server does not set Content-Length. Twisted marks it as partial
"""
return processResult(failure.value.response)
def cbResponse(response):
if response.code == 200:
d = client.readBody(response)
d.addCallback(cbBody)
d.addErrback(cbPartial)
return d
else:
log.msg(f"VT Request failed: {response.code} {response.phrase}")
def cbError(failure):
failure.printTraceback()
def processResult(result):
if self.debug:
log.msg(f"VT postcomment result: {result}")
result = result.decode("utf8")
j = json.loads(result)
return j["response_code"]
d.addCallback(cbResponse)
d.addErrback(cbError)
return d
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
@implementer(IBodyProducer)
class StringProducer:
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def resumeProducing(self):
pass
def stopProducing(self):
pass
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTPS instance
"""
BOUNDARY = b"----------ThIs_Is_tHe_bouNdaRY_$"
L = []
for (key, value) in fields:
L.append(b"--" + BOUNDARY)
L.append(b'Content-Disposition: form-data; name="%s"' % key.encode())
L.append(b"")
L.append(value.encode())
for (key, filename, value) in files:
L.append(b"--" + BOUNDARY)
L.append(
b'Content-Disposition: form-data; name="%s"; filename="%s"'
% (key.encode(), filename.encode())
)
L.append(b"Content-Type: application/octet-stream")
L.append(b"")
L.append(value.read())
L.append(b"--" + BOUNDARY + b"--")
L.append(b"")
body = b"\r\n".join(L)
content_type = b"multipart/form-data; boundary=%s" % BOUNDARY
return content_type, body
| 18,353 | 35.416667 | 117 | py |
cowrie | cowrie-master/src/cowrie/output/jsonlog.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import annotations
import json
import os
from twisted.python import log
import cowrie.core.output
import cowrie.python.logfile
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
jsonlog output
"""
def start(self):
self.epoch_timestamp = CowrieConfig.getboolean(
"output_jsonlog", "epoch_timestamp", fallback=False
)
fn = CowrieConfig.get("output_jsonlog", "logfile")
dirs = os.path.dirname(fn)
base = os.path.basename(fn)
self.outfile = cowrie.python.logfile.CowrieDailyLogFile(
base, dirs, defaultMode=0o664
)
def stop(self):
self.outfile.flush()
def write(self, logentry):
if self.epoch_timestamp:
logentry["epoch"] = int(logentry["time"] * 1000000 / 1000)
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_") or i == "time" or i == "system":
del logentry[i]
try:
json.dump(logentry, self.outfile, separators=(",", ":"))
self.outfile.write("\n")
self.outfile.flush()
except TypeError:
log.err("jsonlog: Can't serialize: '" + repr(logentry) + "'")
| 2,770 | 36.958904 | 75 | py |
cowrie | cowrie-master/src/cowrie/output/hpfeeds3.py | """
Output plugin for HPFeeds
"""
from __future__ import annotations
import json
import logging
from hpfeeds.twisted import ClientSessionService
from twisted.internet import endpoints, ssl
from twisted.internet import reactor
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
Output plugin for HPFeeds
"""
channel = "cowrie.sessions"
def start(self):
if CowrieConfig.has_option("output_hpfeeds3", "channel"):
self.channel = CowrieConfig.get("output_hpfeeds3", "channel")
if CowrieConfig.has_option("output_hpfeeds3", "endpoint"):
endpoint = CowrieConfig.get("output_hpfeeds3", "endpoint")
else:
server = CowrieConfig.get("output_hpfeeds3", "server")
port = CowrieConfig.getint("output_hpfeeds3", "port")
if CowrieConfig.has_option("output_hpfeeds3", "tlscert"):
with open(CowrieConfig.get("output_hpfeeds3", "tlscert")) as fp:
authority = ssl.Certificate.loadPEM(fp.read())
options = ssl.optionsForClientTLS(server, authority)
endpoint = endpoints.SSL4ClientEndpoint(reactor, server, port, options)
else:
endpoint = endpoints.HostnameEndpoint(reactor, server, port)
ident = CowrieConfig.get("output_hpfeeds3", "identifier")
secret = CowrieConfig.get("output_hpfeeds3", "secret")
self.meta = {}
self.client = ClientSessionService(endpoint, ident, secret)
self.client.startService()
def stop(self):
self.client.stopService()
def write(self, entry):
session = entry["session"]
if entry["eventid"] == "cowrie.session.connect":
self.meta[session] = {
"session": session,
"startTime": entry["timestamp"],
"endTime": "",
"peerIP": entry["src_ip"],
"peerPort": entry["src_port"],
"hostIP": entry["dst_ip"],
"hostPort": entry["dst_port"],
"loggedin": None,
"credentials": [],
"commands": [],
"unknownCommands": [],
"urls": [],
"version": None,
"ttylog": None,
"hashes": set(),
"protocol": entry["protocol"],
}
elif entry["eventid"] == "cowrie.login.success":
u, p = entry["username"], entry["password"]
self.meta[session]["loggedin"] = (u, p)
elif entry["eventid"] == "cowrie.login.failed":
u, p = entry["username"], entry["password"]
self.meta[session]["credentials"].append((u, p))
elif entry["eventid"] == "cowrie.command.input":
c = entry["input"]
self.meta[session]["commands"].append(c)
elif entry["eventid"] == "cowrie.command.failed":
uc = entry["input"]
self.meta[session]["unknownCommands"].append(uc)
elif entry["eventid"] == "cowrie.session.file_download":
if "url" in entry:
url = entry["url"]
self.meta[session]["urls"].append(url)
self.meta[session]["hashes"].add(entry["shasum"])
elif entry["eventid"] == "cowrie.session.file_upload":
self.meta[session]["hashes"].add(entry["shasum"])
elif entry["eventid"] == "cowrie.client.version":
v = entry["version"]
self.meta[session]["version"] = v
elif entry["eventid"] == "cowrie.log.closed":
# entry["ttylog"]
with open(entry["ttylog"], "rb") as ttylog:
self.meta[session]["ttylog"] = ttylog.read().hex()
elif entry["eventid"] == "cowrie.session.closed":
meta = self.meta.pop(session, None)
if meta:
log.msg("publishing metadata to hpfeeds", logLevel=logging.DEBUG)
meta["endTime"] = entry["timestamp"]
meta["hashes"] = list(meta["hashes"])
self.client.publish(self.channel, json.dumps(meta).encode("utf-8"))
| 4,221 | 34.478992 | 87 | py |
cowrie | cowrie-master/src/cowrie/output/csirtg.py | from __future__ import annotations
import os
import sys
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="a1b2c3d4")
if token == "a1b2c3d4":
log.msg("output_csirtg: token not found in configuration file")
sys.exit(1)
os.environ["CSIRTG_TOKEN"] = token
import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
CSIRTG output
"""
def start(self):
"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""
self.user = CowrieConfig.get("output_csirtg", "username")
self.feed = CowrieConfig.get("output_csirtg", "feed")
self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
self.description = CowrieConfig.get("output_csirtg", "description")
self.context = {}
# self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
"""
Only pass on connection events
"""
if e["eventid"] == "cowrie.session.connect":
self.submitIp(e)
def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
if system not in [
"cowrie.ssh.factory.CowrieSSHFactory",
"cowrie.telnet.transport.HoneyPotTelnetFactory",
]:
return
today = str(datetime.now().date())
if not self.context.get(today):
self.context = {}
self.context[today] = set()
key = ",".join([peerIP, system])
if key in self.context[today]:
return
self.context[today].add(key)
tags = "scanner,ssh"
port = 22
if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
tags = "scanner,telnet"
port = 23
i = {
"user": self.user,
"feed": self.feed,
"indicator": peerIP,
"portlist": port,
"protocol": "tcp",
"tags": tags,
"firsttime": ts,
"lasttime": ts,
"description": self.description,
}
if self.debug is True:
log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
ind = csirtgsdk.indicator.Indicator(i).submit()
if self.debug is True:
log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
| 2,714 | 26.15 | 86 | py |
cowrie | cowrie-master/src/cowrie/output/mongodb.py | from __future__ import annotations
import pymongo
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
mongodb output
"""
def insert_one(self, collection, event):
try:
object_id = collection.insert_one(event).inserted_id
return object_id
except Exception as e:
log.msg(f"mongo error - {e}")
def update_one(self, collection, session, doc):
try:
object_id = collection.update_one({"session": session}, {"$set": doc})
return object_id
except Exception as e:
log.msg(f"mongo error - {e}")
def start(self):
db_addr = CowrieConfig.get("output_mongodb", "connection_string")
db_name = CowrieConfig.get("output_mongodb", "database")
try:
self.mongo_client = pymongo.MongoClient(db_addr)
self.mongo_db = self.mongo_client[db_name]
# Define Collections.
self.col_sensors = self.mongo_db["sensors"]
self.col_sessions = self.mongo_db["sessions"]
self.col_auth = self.mongo_db["auth"]
self.col_input = self.mongo_db["input"]
self.col_downloads = self.mongo_db["downloads"]
self.col_input = self.mongo_db["input"]
self.col_clients = self.mongo_db["clients"]
self.col_ttylog = self.mongo_db["ttylog"]
self.col_keyfingerprints = self.mongo_db["keyfingerprints"]
self.col_event = self.mongo_db["event"]
self.col_ipforwards = self.mongo_db["ipforwards"]
self.col_ipforwardsdata = self.mongo_db["ipforwardsdata"]
except Exception as e:
log.msg(f"output_mongodb: Error: {e!s}")
def stop(self):
self.mongo_client.close()
def write(self, entry):
for i in list(entry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del entry[i]
eventid = entry["eventid"]
if eventid == "cowrie.session.connect":
# Check if sensor exists, else add it.
doc = self.col_sensors.find_one({"sensor": self.sensor})
if not doc:
self.insert_one(self.col_sensors, entry)
# Prep extra elements just to make django happy later on
entry["starttime"] = entry["timestamp"]
entry["endtime"] = None
entry["sshversion"] = None
entry["termsize"] = None
log.msg("Session Created")
self.insert_one(self.col_sessions, entry)
elif eventid in ["cowrie.login.success", "cowrie.login.failed"]:
self.insert_one(self.col_auth, entry)
elif eventid in ["cowrie.command.input", "cowrie.command.failed"]:
self.insert_one(self.col_input, entry)
elif eventid == "cowrie.session.file_download":
# ToDo add a config section and offer to store the file in the db - useful for central logging
# we will add an option to set max size, if its 16mb or less we can store as normal,
# If over 16 either fail or we just use gridfs both are simple enough.
self.insert_one(self.col_downloads, entry)
elif eventid == "cowrie.client.version":
doc = self.col_sessions.find_one({"session": entry["session"]})
if doc:
doc["sshversion"] = entry["version"]
self.update_one(self.col_sessions, entry["session"], doc)
else:
pass
elif eventid == "cowrie.client.size":
doc = self.col_sessions.find_one({"session": entry["session"]})
if doc:
doc["termsize"] = "{}x{}".format(entry["width"], entry["height"])
self.update_one(self.col_sessions, entry["session"], doc)
else:
pass
elif eventid == "cowrie.session.closed":
doc = self.col_sessions.find_one({"session": entry["session"]})
if doc:
doc["endtime"] = entry["timestamp"]
self.update_one(self.col_sessions, entry["session"], doc)
else:
pass
elif eventid == "cowrie.log.closed":
# ToDo Compress to opimise the space and if your sending to remote db
with open(entry["ttylog"]) as ttylog:
entry["ttylogpath"] = entry["ttylog"]
entry["ttylog"] = ttylog.read().encode().hex()
self.insert_one(self.col_ttylog, entry)
elif eventid == "cowrie.client.fingerprint":
self.insert_one(self.col_keyfingerprints, entry)
elif eventid == "cowrie.direct-tcpip.request":
self.insert_one(self.col_ipforwards, entry)
elif eventid == "cowrie.direct-tcpip.data":
self.insert_one(self.col_ipforwardsdata, entry)
# Catch any other event types
else:
self.insert_one(self.col_event, entry)
| 5,057 | 37.318182 | 106 | py |
cowrie | cowrie-master/src/cowrie/output/discord.py | """
Simple Discord webhook logger
"""
from __future__ import annotations
import json
from io import BytesIO
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.web import client, http_headers
from twisted.web.client import FileBodyProducer
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
def start(self) -> None:
self.url = CowrieConfig.get("output_discord", "url").encode("utf8")
contextFactory = WebClientContextFactory()
self.agent = client.Agent(reactor, contextFactory)
def stop(self) -> None:
pass
def write(self, logentry):
webhook_message = "__New logentry__\n"
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
else:
webhook_message += f"{i}: `{logentry[i]}`\n"
self.postentry({"content": webhook_message})
def postentry(self, entry):
headers = http_headers.Headers(
{
b"Content-Type": [b"application/json"],
}
)
body = FileBodyProducer(BytesIO(json.dumps(entry).encode("utf8")))
self.agent.request(b"POST", self.url, headers, body)
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
| 1,486 | 26.537037 | 75 | py |
cowrie | cowrie-master/src/cowrie/output/graylog.py | """
Simple Graylog HTTP Graylog Extended Log Format (GELF) logger.
"""
from __future__ import annotations
import json
import time
from io import BytesIO
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.web import client, http_headers
from twisted.web.client import FileBodyProducer
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
def start(self) -> None:
self.url = CowrieConfig.get("output_graylog", "url").encode("utf8")
contextFactory = WebClientContextFactory()
self.agent = client.Agent(reactor, contextFactory)
def stop(self) -> None:
pass
def write(self, logentry):
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
gelf_message = {
"version": "1.1",
"host": logentry["sensor"],
"timestamp": time.time(),
"short_message": json.dumps(logentry),
"level": 1,
}
self.postentry(gelf_message)
def postentry(self, entry):
headers = http_headers.Headers(
{
b"Content-Type": [b"application/json"],
}
)
body = FileBodyProducer(BytesIO(json.dumps(entry).encode("utf8")))
self.agent.request(b"POST", self.url, headers, body)
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
| 1,607 | 26.254237 | 75 | py |
cowrie | cowrie-master/src/cowrie/output/abuseipdb.py | # Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# all copies or substantial portions of the Software.
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Cowrie plugin for reporting login attempts via the AbuseIPDB API.
"AbuseIPDB is a project dedicated to helping combat the spread of hackers,
spammers, and abusive activity on the internet." <https://www.abuseipdb.com/>
"""
from __future__ import annotations
__author__ = "Benjamin Stephens"
__version__ = "0.3b3"
import pickle
from collections import deque
from datetime import datetime
from json.decoder import JSONDecodeError
from pathlib import Path
from time import sleep, time
from treq import post
from twisted.internet import defer, threads
from twisted.internet import reactor
from twisted.python import log
from twisted.web import http
from cowrie.core import output
from cowrie.core.config import CowrieConfig
# How often we clean and dump and our lists/dict...
CLEAN_DUMP_SCHED = 600
# ...and the file we dump to.
DUMP_FILE: str = "aipdb.dump"
ABUSEIP_URL = "https://api.abuseipdb.com/api/v2/report"
# AbuseIPDB will just 429 us if we report an IP too often; currently 15 minutes
# (900 seconds); set lower limit here to protect againt bad user input.
REREPORT_MINIMUM = 900
class Output(output.Output):
def start(self):
self.tolerance_attempts: int = CowrieConfig.getint(
"output_abuseipdb", "tolerance_attempts", fallback=10
)
self.state_path = Path(CowrieConfig.get("output_abuseipdb", "dump_path"))
self.state_dump = self.state_path / DUMP_FILE
self.logbook = LogBook(self.tolerance_attempts, self.state_dump)
# Pass our instance of LogBook() to Reporter() so we don't end up
# working with different records.
self.reporter = Reporter(self.logbook, self.tolerance_attempts)
# We store the LogBook state any time a shutdown occurs. The rest of
# our start-up is just for loading and cleaning the previous state
try:
with open(self.state_dump, "rb") as f:
self.logbook.update(pickle.load(f))
# Check to see if we're still asleep after receiving a Retry-After
# header in a previous response
if self.logbook["sleeping"]:
t_wake: float = self.logbook["sleep_until"]
t_now: float = time()
if t_wake > t_now:
# If we're meant to be asleep, we'll set logbook.sleep to
# true and logbook.sleep_until to the time we can wake-up
self.logbook.sleeping = True
self.logbook.sleep_until = t_wake
# and we set an alarm so the reactor knows when he can drag
# us back out of bed
reactor.callLater(t_wake - t_now, self.logbook.wakeup)
del self.logbook["sleeping"]
del self.logbook["sleep_until"]
tolerated = self.logbook.pop("tolerated")
except (pickle.UnpicklingError, FileNotFoundError, KeyError):
if self.state_path.exists():
pass
else:
# If we don't already have an abuseipdb directory, let's make
# one with the necessary permissions now.
Path(self.state_path).mkdir(mode=0o700, parents=False, exist_ok=False)
# And we do a clean-up to make sure that we're not carrying any expired
# entries. The clean-up task ends by calling itself in a callLater,
# thus running every CLEAN_DUMP_SCHED seconds until the end of time.
self.logbook.cleanup_and_dump_state()
# If tolerance_attempts > the previous setting, we need to change the
# maximum length of the deque for any previously seen IP that we're
# loading, otherwise we'd potentially have IPs that may never trigger
# a report
try:
if tolerated != self.tolerance_attempts:
for k in self.logbook:
if self.logbook[k].__class__() == deque():
self.logbook[k] = deque(
[*self.logbook[k]], maxlen=self.tolerance_attempts
)
except UnboundLocalError:
pass
log.msg(
eventid="cowrie.abuseipdb.started",
format=f"AbuseIPDB Plugin version {__version__} started. Currently in beta.",
)
def stop(self):
self.logbook.cleanup_and_dump_state(mode=1)
def write(self, ev):
if self.logbook.sleeping:
return
if ev["eventid"].rsplit(".", 1)[0] == "cowrie.login":
# If tolerance_attempts was set to 1 or 0, we don't need to
# keep logs so our handling of the event is different than if > 1
if self.tolerance_attempts <= 1:
self.intolerant_observer(ev["src_ip"], time(), ev["username"])
else:
self.tolerant_observer(ev["src_ip"], time())
def intolerant_observer(self, ip, t, uname):
# Checks if already reported; if yes, checks if we can rereport yet.
# The entry for a reported IP is a tuple (None, time_reported). If IP
# is not already in logbook, reports it immediately
if ip in self.logbook:
if self.logbook.can_rereport(ip, t):
self.reporter.report_ip_single(ip, t, uname)
else:
return
else:
self.reporter.report_ip_single(ip, t, uname)
def tolerant_observer(self, ip, t):
# Appends the time an IP was seen to it's list in logbook. Once the
# length of the list equals tolerance_attempts, the IP is reported.
if ip in self.logbook:
try:
if self.logbook[ip][0]:
# Evaluates true if IP not already reported. If reported,
# logbook entry is of the form (None, time_reported).
self.logbook[ip].append(t)
self.logbook.clean_expired_timestamps(ip, t)
if len(self.logbook[ip]) >= self.tolerance_attempts:
self.reporter.report_ip_multiple(ip)
elif self.logbook.can_rereport(ip, t):
# Check if reported IP is ready for re-reporting
self.logbook[ip] = deque([t], maxlen=self.tolerance_attempts)
else:
return
except IndexError:
# If IP address was in logbook but had no entries then we're
# fine to re-report.
self.logbook[ip].append(t)
else:
self.logbook[ip] = deque([t], maxlen=self.tolerance_attempts)
class LogBook(dict):
"""
Dictionary class with methods for cleaning and dumping its state.
This class should be treated as global state. For the moment this is
achieved simply by passing the instance created by Output() directly to
Reporter(). Sharing is caring.
"""
def __init__(self, tolerance_attempts, state_dump):
self.sleeping = False
self.sleep_until: float = 0.0
self.tolerance_attempts = tolerance_attempts
self.tolerance_window: int = 60 * CowrieConfig.getint(
"output_abuseipdb", "tolerance_window", fallback=120
)
self.rereport_after: float = 3600 * CowrieConfig.getfloat(
"output_abuseipdb", "rereport_after", fallback=24
)
if self.rereport_after < REREPORT_MINIMUM:
self.rereport_after = REREPORT_MINIMUM
self.state_dump = state_dump
# To write our dump to disk we have a method we call in a thread so we
# don't block if we get slow io. This is a cheap hack to get a lock on
# the file. See self.write_dump_file()
self._writing = False
super().__init__()
def wakeup(self):
# This is the method we pass in a callLater() before we go to sleep.
self.sleeping = False
self.sleep_until = 0
self.recall = reactor.callLater(CLEAN_DUMP_SCHED, self.cleanup_and_dump_state)
log.msg(
eventid="cowrie.abuseipdb.wakeup",
format="AbuseIPDB plugin resuming activity after receiving "
"Retry-After header in previous response.",
)
def clean_expired_timestamps(self, ip_key, current_time):
# Performs popleft() if leftmost timestamp has expired. Continues doing
# so until either; 1) a timestamp within our reporting window is
# reached, or; 2) the list is empty.
while self[ip_key]:
if not self[ip_key][0]:
break
elif self[ip_key][0] < current_time - self.tolerance_window:
self[ip_key].popleft()
else:
break
def find_and_delete_empty_entries(self):
# Search and destroy method. Iterates over dict, appends k to delete_me
# where v is an empty list.
delete_me = []
for k in self:
if not self[k]:
delete_me.append(k)
self.delete_entries(delete_me)
def delete_entries(self, delete_me):
for i in delete_me:
del self[i]
def can_rereport(self, ip_key, current_time):
# Checks if an IP in the logbook that has already been reported is
# ready to be re-reported again.
try:
if current_time > self[ip_key][1] + self.rereport_after:
return True
elif self[ip_key][0] and self.tolerance_attempts <= 1:
# If we were previously running with a tolerance_attempts > 1
# and have been been restarted with tolerance_attempts <= 1,
# we could still be carrying some logs which would evaluate as
# false in our first test. Reported IPs will still evaluate
# false here.
return True
else:
return False
except IndexError:
return True
def cleanup_and_dump_state(self, mode=0):
# Runs a full clean-up of logbook. Re-calls itself in CLEAN_DUMP_SCHED
# seconds. MODES: 0) Normal looping task, and; 1) Sleep/Stop mode;
# cancels any scheduled callLater() and doesn't recall itself.
if mode == 1:
try:
self.recall.cancel()
except AttributeError:
pass
if self.sleeping:
t = self.sleep_until
else:
t = time()
delete_me = []
for k in self:
if self.can_rereport(k, t):
delete_me.append(k)
self.clean_expired_timestamps(k, t)
self.delete_entries(delete_me)
self.find_and_delete_empty_entries()
self.dump_state()
if mode == 0 and not self.sleeping:
self.recall = reactor.callLater(
CLEAN_DUMP_SCHED, self.cleanup_and_dump_state
)
def dump_state(self):
dump = {
"sleeping": self.sleeping,
"sleep_until": self.sleep_until,
# Store current self_tolerance for comparison on next start
"tolerated": self.tolerance_attempts,
}
for k, v in self.items():
dump[k] = v
reactor.callInThread(self.write_dump_file, dump)
def write_dump_file(self, dump):
# Check self._writing; waits for release; timeout after 10 seconds.
i = 0
while self._writing:
sleep(1)
i += 1
if i >= 10:
return
# Acquire 'lock'
self._writing = True
with open(self.state_dump, "wb") as f:
pickle.dump(dump, f, protocol=pickle.HIGHEST_PROTOCOL)
# Release 'lock'
self._writing = False
class Reporter:
"""
HTTP client and methods for preparing report paramaters.
"""
def __init__(self, logbook, attempts):
self.logbook = logbook
self.attempts = attempts
self.headers = {
"User-Agent": "Cowrie Honeypot AbuseIPDB plugin",
"Accept": "application/json",
"Key": CowrieConfig.get("output_abuseipdb", "api_key"),
}
def report_ip_single(self, ip, t, uname):
self.logbook[ip] = (None, t)
t = self.epoch_to_string_utc(t)
params = {
"ip": ip,
"categories": "18,22",
"comment": "Cowrie Honeypot: Unauthorised SSH/Telnet login attempt "
'with user "{}" at {}'.format(uname, t),
}
self.http_request(params)
def report_ip_multiple(self, ip):
t_last = self.logbook[ip].pop()
t_first = self.epoch_to_string_utc(self.logbook[ip].popleft())
self.logbook[ip] = (None, t_last)
t_last = self.epoch_to_string_utc(t_last)
params = {
"ip": ip,
"categories": "18,22",
"comment": "Cowrie Honeypot: {} unauthorised SSH/Telnet login attempts "
"between {} and {}".format(self.attempts, t_first, t_last),
}
self.http_request(params)
@staticmethod
def epoch_to_string_utc(t):
t_utc = datetime.utcfromtimestamp(t)
return t_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
@staticmethod
def log_response_failed(ip, response, reason):
log.msg(
eventid="cowrie.abuseipdb.reportfail",
format="AbuseIPDB plugin failed to report IP %(IP)s. Received HTTP "
"status code %(response)s in response. Reason: %(reason)s.",
IP=ip,
response=response,
reason=reason,
)
@defer.inlineCallbacks
def http_request(self, params):
try:
response = yield post(
url=ABUSEIP_URL,
headers=self.headers,
params=params,
)
except Exception as e:
log.msg(
eventid="cowrie.abuseipdb.reportfail",
format="AbuseIPDB plugin failed to report IP %(IP)s. "
"Exception raised: %(exception)s.",
IP=params["ip"],
exception=repr(e),
)
return
if response.code != http.OK:
if response.code == 429:
return self.rate_limit_handler(params, response)
try:
reason = http.RESPONSES[response.code].decode("utf-8")
except Exception:
reason = "Unable to determine."
self.log_response_failed(params["ip"], response.code, reason)
return
j = yield response.json()
log.msg(
eventid="cowrie.abuseipdb.reportedip",
format="AbuseIPDB plugin successfully reported %(IP)s. Current "
"AbuseIPDB confidence score for this IP is %(confidence)s",
IP=params["ip"],
confidence=j["data"]["abuseConfidenceScore"],
)
@defer.inlineCallbacks
def rate_limit_handler(self, params, response):
try:
j = yield response.json()
reason = j["errors"][0]["detail"]
except (KeyError, JSONDecodeError):
reason = "No other information provided or unexpected response"
self.log_response_failed(params["ip"], response.code, reason)
# AbuseIPDB will respond with a 429 and a Retry-After in its response
# headers if we've exceeded our limits for the day. Here we test for
# that header and, if it exists, put ourselves to sleep.
retry_after = yield response.headers.hasHeader("Retry-After")
if retry_after:
retry = yield response.headers.getRawHeaders("Retry-After")
retry = int(retry.pop())
if retry > 86340:
yield threads.deferToThread(self.sleeper_thread)
log.msg(
eventid="cowrie.abuseipdb.ratelimited",
format="AbuseIPDB plugin received Retry-After header > 86340 "
"seconds in previous response. Possible delayed quota "
"reset on AbuseIPDB servers; retrying request now.",
)
return self.http_request(params)
self.logbook.sleeping = True
self.logbook.sleep_until = time() + retry
reactor.callLater(retry, self.logbook.wakeup)
# It's not serious if we don't, but it's best to call the clean-up
# after logbook.sleeping has been set to True. The clean-up method
# checks for this flag and will use the wake-up time rather than
# the current time when sleep is set. mode=1 ensures we'll cancel
# any already scheduled calls to clean-up and don't schedule
# another one until the wake-up method calls it again.
self.logbook.cleanup_and_dump_state(mode=1)
self.epoch_to_string_utc(self.logbook.sleep_until)
log.msg(
eventid="cowrie.abuseipdb.ratelimited",
format="AbuseIPDB plugin received Retry-After header in "
"response. Reporting activity will resume in "
"%(retry_after)s seconds at %(wake_at)s",
retry_after=retry,
wake_at=self.epoch_to_string_utc(self.logbook.sleep_until),
)
def sleeper_thread(self):
# Cheap retry wait hack. Call in thread so as not to block.
sleep(10)
| 18,525 | 36.275654 | 89 | py |
cowrie | cowrie-master/src/cowrie/output/datadog.py | """
Simple Datadog HTTP logger.
"""
from __future__ import annotations
import json
import platform
from io import BytesIO
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.python import log
from twisted.web import client, http_headers
from twisted.web.client import FileBodyProducer
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
def start(self) -> None:
self.url = CowrieConfig.get("output_datadog", "url").encode("utf8")
self.api_key = CowrieConfig.get(
"output_datadog", "api_key", fallback=""
).encode("utf8")
if len(self.api_key) == 0:
log.msg("Datadog output module: API key is not defined.")
self.ddsource = CowrieConfig.get(
"output_datadog", "ddsource", fallback="cowrie"
)
self.ddtags = CowrieConfig.get("output_datadog", "ddtags", fallback="env:dev")
self.service = CowrieConfig.get(
"output_datadog", "service", fallback="honeypot"
)
contextFactory = WebClientContextFactory()
self.agent = client.Agent(reactor, contextFactory)
def stop(self) -> None:
pass
def write(self, logentry):
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
message = [
{
"ddsource": self.ddsource,
"ddtags": self.ddtags,
"hostname": platform.node(),
"message": json.dumps(logentry),
"service": self.service,
}
]
self.postentry(message)
def postentry(self, entry):
base_headers = {
b"Accept": [b"application/json"],
b"Content-Type": [b"application/json"],
b"DD-API-KEY": [self.api_key],
}
headers = http_headers.Headers(base_headers)
body = FileBodyProducer(BytesIO(json.dumps(entry).encode("utf8")))
self.agent.request(b"POST", self.url, headers, body)
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
| 2,286 | 30.763889 | 86 | py |
cowrie | cowrie-master/src/cowrie/output/crashreporter.py | """
Cowrie Crashreport
This output plugin is not like the others.
It has its own emit() function and does not use cowrie eventid's
to avoid circular calls
"""
from __future__ import annotations
import json
import treq
from twisted.internet import defer
from twisted.logger._levels import LogLevel
from twisted.python import log
import cowrie.core.output
from cowrie._version import __version__
from cowrie.core.config import CowrieConfig
COWRIE_USER_AGENT = f"Cowrie Honeypot {__version__}".encode("ascii")
COWRIE_URL = "https://api.cowrie.org/v1/crash"
class Output(cowrie.core.output.Output):
"""
Cowrie Crashreporter output
"""
def start(self):
"""
Start output plugin
"""
self.apiKey = CowrieConfig.get("output_cowrie", "api_key", fallback=None)
self.debug = CowrieConfig.getboolean("output_cowrie", "debug", fallback=False)
def emit(self, event):
"""
Note we override emit() here, unlike other plugins.
"""
if event.get("log_level") == LogLevel.critical:
self.crashreport(event)
def stop(self):
"""
Stop output plugin
"""
pass
def write(self, entry):
"""
events are done in emit() not in write()
"""
pass
@defer.inlineCallbacks
def crashreport(self, entry):
"""
Crash report
"""
try:
r = yield treq.post(
COWRIE_URL,
json.dumps(
{"log_text": entry.get("log_text"), "system": entry.get("system")}
).encode("ascii"),
headers={
b"Content-Type": [b"application/json"],
b"User-Agent": [COWRIE_USER_AGENT],
},
)
content = yield r.text()
if self.debug:
log.msg("crashreport: " + content)
except Exception as e:
log.msg("crashreporter failed" + repr(e))
| 2,005 | 24.392405 | 86 | py |
cowrie | cowrie-master/src/cowrie/output/elasticsearch.py | # Simple elasticsearch logger
from __future__ import annotations
from typing import Any
from elasticsearch import Elasticsearch, NotFoundError
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
elasticsearch output
"""
index: str
pipeline: str
es: Any
def start(self):
host = CowrieConfig.get("output_elasticsearch", "host")
port = CowrieConfig.get("output_elasticsearch", "port")
self.index = CowrieConfig.get("output_elasticsearch", "index")
self.type = CowrieConfig.get("output_elasticsearch", "type")
self.pipeline = CowrieConfig.get("output_elasticsearch", "pipeline")
# new options (creds + https)
username = CowrieConfig.get("output_elasticsearch", "username", fallback=None)
password = CowrieConfig.get("output_elasticsearch", "password", fallback=None)
use_ssl = CowrieConfig.getboolean("output_elasticsearch", "ssl", fallback=False)
ca_certs = CowrieConfig.get("output_elasticsearch", "ca_certs", fallback=None)
verify_certs = CowrieConfig.getboolean(
"output_elasticsearch", "verify_certs", fallback=True
)
options: dict[str, Any] = {}
# connect
if (username is not None) and (password is not None):
options["http_auth"] = (username, password)
if use_ssl:
options["scheme"] = "https"
options["use_ssl"] = use_ssl
options["ssl_show_warn"] = False
options["verify_certs"] = verify_certs
if verify_certs:
options["ca_certs"] = ca_certs
# connect
self.es = Elasticsearch(f"{host}:{port}", **options)
# self.es = Elasticsearch('{0}:{1}'.format(self.host, self.port))
self.check_index()
# ensure geoip pipeline is well set up
if self.pipeline == "geoip":
# create a new feature if it does not exist yet
self.check_geoip_mapping()
# ensure the geoip pipeline is setup
self.check_geoip_pipeline()
def check_index(self):
"""
This function check whether the index exists.
"""
if not self.es.indices.exists(index=self.index):
# create index
self.es.indices.create(index=self.index)
def check_geoip_mapping(self):
"""
This function ensures that the right mapping is set up
to convert source ip (src_ip) into geo data.
"""
if self.es.indices.exists(index=self.index):
# Add mapping (to add geo field -> for geoip)
# The new feature is named 'geo'.
# You can put mappings several times, if it exists the
# PUT requests will be ignored.
self.es.indices.put_mapping(
index=self.index,
body={
"properties": {
"geo": {"properties": {"location": {"type": "geo_point"}}}
}
},
)
def check_geoip_pipeline(self):
"""
This function aims to set at least a geoip pipeline
to map IP to geo locations
"""
try:
# check if the geoip pipeline exists. An error
# is raised if the pipeline does not exist
self.es.ingest.get_pipeline(id=self.pipeline)
except NotFoundError:
# geoip pipeline
body = {
"description": "Add geoip info",
"processors": [
{
"geoip": {
"field": "src_ip", # input field of the pipeline (source address)
"target_field": "geo", # output field of the pipeline (geo data)
"database_file": "GeoLite2-City.mmdb",
}
}
],
}
self.es.ingest.put_pipeline(id=self.pipeline, body=body)
def stop(self):
pass
def write(self, logentry):
for i in list(logentry.keys()):
# remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
self.es.index(
index=self.index, doc_type=self.type, body=logentry, pipeline=self.pipeline
)
| 4,393 | 33.873016 | 94 | py |
cowrie | cowrie-master/src/cowrie/output/localsyslog.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import annotations
import syslog
import twisted.python.syslog
import cowrie.core.cef
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
localsyslog output
"""
def start(self):
self.format = CowrieConfig.get("output_localsyslog", "format")
facilityString = CowrieConfig.get("output_localsyslog", "facility")
self.facility = vars(syslog)["LOG_" + facilityString]
self.syslog = twisted.python.syslog.SyslogObserver(
prefix="cowrie", facility=self.facility
)
def stop(self):
pass
def write(self, logentry):
if "isError" not in logentry:
logentry["isError"] = False
if self.format == "cef":
self.syslog.emit(
{
"message": [cowrie.core.cef.formatCef(logentry)],
"isError": False,
"system": "cowrie",
}
)
else:
# message appears with additional spaces if message key is defined
logentry["message"] = [logentry["message"]]
self.syslog.emit(logentry)
| 2,683 | 36.277778 | 78 | py |
cowrie | cowrie-master/src/cowrie/output/threatjammer.py |
"""
Cowrie plugin for reporting login attempts via the ThreatJammer.com Report API.
"ThreatJammer.com is a tool to track and detect attacks" <https://threatjammer.com>
"""
__author__ = "Diego Parrilla Santamaria"
__version__ = "0.1.0"
import datetime
from typing import Optional
from collections.abc import Generator
from treq import post
from twisted.internet import defer
from twisted.python import log
from twisted.web import http
from cowrie.core import output
from cowrie.core.config import CowrieConfig
# Buffer flush frequency (in minutes)
BUFFER_FLUSH_FREQUENCY: int = 1
# Buffer flush max size
BUFFER_FLUSH_MAX_SIZE: int = 1000
# API URL
THREATJAMMER_REPORT_URL: str = "https://dublin.report.threatjammer.com/v1/ip"
THREATJAMMER_DEFAULT_TTL: int = 86400
# Default category to store the ip address.
THREATJAMMER_DEFAULT_CATEGORY: str = "ABUSE"
# Track the login event
THREATJAMMER_DEFAULT_TRACK_LOGIN: bool = True
# Track the session event
THREATJAMMER_DEFAULT_TRACK_SESSION: bool = False
# Default tags to store the ip address.
THREATJAMMER_DEFAULT_TAGS: str = "COWRIE"
class HTTPClient:
"""
HTTP client to report the IP adress set
"""
def __init__(self, api_url: str, bearer_token: str):
self.headers = {
"User-Agent": "Cowrie Honeypot ThreatJammer.com output plugin",
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {bearer_token}",
}
self.api_url = api_url
def report(
self,
ip_set: set[str],
category: str,
ttl: int = 0,
tags: Optional[list[str]] = None,
) -> None:
payload: dict = {
"addresses": list(ip_set),
"type": category,
"ttl": ttl,
"tags": tags,
}
self._post(payload)
@defer.inlineCallbacks
def _post(self, payload: dict) -> Generator:
try:
response = yield post(
url=self.api_url,
headers=self.headers,
json=payload,
)
except Exception as e:
log.msg(
eventid="cowrie.threatjammer.reportfail",
format="ThreatJammer.com output plugin failed when reporting the payload %(payload)s. "
"Exception raised: %(exception)s.",
payload=str(payload),
exception=repr(e),
)
return
if response.code != http.ACCEPTED:
reason = yield response.text()
log.msg(
eventid="cowrie.threatjammer.reportfail",
format="ThreatJammer.com output plugin failed to report the payload %(payload)s. Returned the\
HTTP status code %(response)s. Reason: %(reason)s.",
payload=str(payload),
response=response.code,
reason=reason,
)
else:
log.msg(
eventid="cowrie.threatjammer.reportedipset",
format="ThreatJammer.com output plugin successfully reported %(payload)s.",
payload=str(payload),
)
return
class Output(output.Output):
def start(self):
self.api_url = CowrieConfig.get(
"output_threatjammer",
"api_url",
fallback=THREATJAMMER_REPORT_URL,
)
self.default_ttl = CowrieConfig.getint(
"output_threatjammer", "ttl", fallback=THREATJAMMER_DEFAULT_TTL
)
self.default_category = CowrieConfig.get(
"output_threatjammer",
"category",
fallback=THREATJAMMER_DEFAULT_CATEGORY,
)
self.track_login = CowrieConfig.getboolean(
"output_threatjammer",
"track_login",
fallback=THREATJAMMER_DEFAULT_TRACK_LOGIN,
)
self.track_session = CowrieConfig.getboolean(
"output_threatjammer",
"track_session",
fallback=THREATJAMMER_DEFAULT_TRACK_SESSION,
)
self.bearer_token = CowrieConfig.get("output_threatjammer", "bearer_token")
self.tags = CowrieConfig.get("output_threatjammer", "tags").split(",")
self.last_report: int = -1
self.report_bucket: int = BUFFER_FLUSH_MAX_SIZE
self.ip_set: set[str] = set()
self.track_events = []
if self.track_login:
self.track_events.append("cowrie.login")
if self.track_session:
self.track_events.append("cowrie.session")
self.http_client = HTTPClient(self.api_url, self.bearer_token)
log.msg(
eventid="cowrie.threatjammer.reporterinitialized",
format="ThreatJammer.com output plugin successfully initialized.\
Category=%(category)s. TTL=%(ttl)s. Session Tracking=%(session_tracking)s. Login Tracking=%(login_tracking)s",
category=self.default_category,
ttl=self.default_ttl,
session_tracking=self.track_session,
login_tracking=self.track_login,
)
def stop(self):
log.msg(
eventid="cowrie.threatjammer.reporterterminated",
format="ThreatJammer.com output plugin successfully terminated. Bye!",
)
def write(self, ev):
if ev["eventid"].rsplit(".", 1)[0] in self.track_events:
source_ip: str = ev["src_ip"]
self.ip_set.add(source_ip)
if self.last_report == -1:
# Never execute in this cycle. Store timestamp of the first element.
self.last_report = int(datetime.datetime.utcnow().timestamp())
self.report_bucket -= 1
if (
self.report_bucket == 0
or (int(datetime.datetime.utcnow().timestamp()) - self.last_report)
> BUFFER_FLUSH_FREQUENCY * 60
):
# Flush the ip_set if 1000 ips counted or more than 10 minutes since last flush
self.http_client.report(
ip_set=self.ip_set,
category=self.default_category,
ttl=self.default_ttl,
tags=self.tags,
)
self.ip_set = set()
self.report_bucket = BUFFER_FLUSH_MAX_SIZE
self.last_report = -1
| 7,010 | 32.545455 | 111 | py |
cowrie | cowrie-master/src/cowrie/output/dshield.py | """
Send SSH logins to SANS DShield.
See https://isc.sans.edu/ssh.html
"""
from __future__ import annotations
import base64
import hashlib
import hmac
import re
import time
import dateutil.parser
import requests
from twisted.internet import reactor
from twisted.internet import threads
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
dshield output
"""
debug: bool = False
userid: str
batch_size: int
batch: list
def start(self):
self.auth_key = CowrieConfig.get("output_dshield", "auth_key")
self.userid = CowrieConfig.get("output_dshield", "userid")
self.batch_size = CowrieConfig.getint("output_dshield", "batch_size")
self.debug = CowrieConfig.getboolean("output_dshield", "debug", fallback=False)
self.batch = [] # This is used to store login attempts in batches
def stop(self):
pass
def write(self, entry):
if (
entry["eventid"] == "cowrie.login.success"
or entry["eventid"] == "cowrie.login.failed"
):
date = dateutil.parser.parse(entry["timestamp"])
self.batch.append(
{
"date": str(date.date()),
"time": date.time().strftime("%H:%M:%S"),
"timezone": time.strftime("%z"),
"source_ip": entry["src_ip"],
"user": entry["username"],
"password": entry["password"],
}
)
if len(self.batch) >= self.batch_size:
batch_to_send = self.batch
self.submit_entries(batch_to_send)
self.batch = []
def transmission_error(self, batch):
self.batch.extend(batch)
if len(self.batch) > self.batch_size * 2:
self.batch = self.batch[-self.batch_size :]
def submit_entries(self, batch):
"""
Large parts of this method are adapted from kippo-pyshield by jkakavas
Many thanks to their efforts. https://github.com/jkakavas/kippo-pyshield
"""
# The nonce is predefined as explained in the original script :
# trying to avoid sending the authentication key in the "clear" but
# not wanting to deal with a full digest like exchange. Using a
# fixed nonce to mix up the limited userid.
_nonceb64 = "ElWO1arph+Jifqme6eXD8Uj+QTAmijAWxX1msbJzXDM="
log_output = ""
for attempt in self.batch:
log_output += "{}\t{}\t{}\t{}\t{}\t{}\n".format(
attempt["date"],
attempt["time"],
attempt["timezone"],
attempt["source_ip"],
attempt["user"],
attempt["password"],
)
nonce = base64.b64decode(_nonceb64)
digest = base64.b64encode(
hmac.new(
nonce + self.userid.encode("ascii"),
base64.b64decode(self.auth_key),
hashlib.sha256,
).digest()
)
auth_header = "credentials={} nonce={} userid={}".format(
digest.decode("ascii"), _nonceb64, self.userid
)
headers = {"X-ISC-Authorization": auth_header, "Content-Type": "text/plain"}
if self.debug:
log.msg(f"dshield: posting: {headers!r}")
log.msg(f"dshield: posting: {log_output}")
req = threads.deferToThread(
requests.request,
method="PUT",
url="https://secure.dshield.org/api/file/sshlog",
headers=headers,
timeout=10,
data=log_output,
)
def check_response(resp):
failed = False
response = resp.content.decode("utf8")
if self.debug:
log.msg(f"dshield: status code {resp.status_code}")
log.msg(f"dshield: response {resp.content}")
if resp.ok:
sha1_regex = re.compile(r"<sha1checksum>([^<]+)<\/sha1checksum>")
sha1_match = sha1_regex.search(response)
sha1_local = hashlib.sha1()
sha1_local.update(log_output.encode("utf8"))
if sha1_match is None:
log.msg(
"dshield: ERROR: Could not find sha1checksum in response: {}".format(
repr(response)
)
)
failed = True
elif sha1_match.group(1) != sha1_local.hexdigest():
log.msg(
"dshield: ERROR: SHA1 Mismatch {} {} .".format(
sha1_match.group(1), sha1_local.hexdigest()
)
)
failed = True
md5_regex = re.compile(r"<md5checksum>([^<]+)<\/md5checksum>")
md5_match = md5_regex.search(response)
md5_local = hashlib.md5()
md5_local.update(log_output.encode("utf8"))
if md5_match is None:
log.msg("dshield: ERROR: Could not find md5checksum in response")
failed = True
elif md5_match.group(1) != md5_local.hexdigest():
log.msg(
"dshield: ERROR: MD5 Mismatch {} {} .".format(
md5_match.group(1), md5_local.hexdigest()
)
)
failed = True
log.msg(
f"dshield: SUCCESS: Sent {log_output} bytes worth of data to secure.dshield.org"
)
else:
log.msg(f"dshield ERROR: error {resp.status_code}.")
log.msg(f"dshield response was {response}")
failed = True
if failed:
# Something went wrong, we need to add them to batch.
reactor.callFromThread(self.transmission_error, batch)
req.addCallback(check_response)
| 6,123 | 33.994286 | 100 | py |
cowrie | cowrie-master/src/cowrie/output/splunk.py |
"""
Splunk HTTP Event Collector (HEC) Connector.
Not ready for production use.
JSON log file is still recommended way to go
"""
from __future__ import annotations
import json
from io import BytesIO
from typing import Any
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.python import log
from twisted.web import client, http_headers
from twisted.web.client import FileBodyProducer
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
Splunk HEC output
"""
token: str
agent: Any
url: bytes
def start(self) -> None:
self.token = CowrieConfig.get("output_splunk", "token")
self.url = CowrieConfig.get("output_splunk", "url").encode("utf8")
self.index = CowrieConfig.get("output_splunk", "index", fallback=None)
self.source = CowrieConfig.get("output_splunk", "source", fallback=None)
self.sourcetype = CowrieConfig.get("output_splunk", "sourcetype", fallback=None)
self.host = CowrieConfig.get("output_splunk", "host", fallback=None)
contextFactory = WebClientContextFactory()
# contextFactory.method = TLSv1_METHOD
self.agent = client.Agent(reactor, contextFactory)
def stop(self) -> None:
pass
def write(self, logentry):
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
splunkentry = {}
if self.index:
splunkentry["index"] = self.index
if self.source:
splunkentry["source"] = self.source
if self.sourcetype:
splunkentry["sourcetype"] = self.sourcetype
if self.host:
splunkentry["host"] = self.host
else:
splunkentry["host"] = logentry["sensor"]
splunkentry["event"] = logentry
self.postentry(splunkentry)
def postentry(self, entry):
"""
Send a JSON log entry to Splunk with Twisted
"""
headers = http_headers.Headers(
{
b"User-Agent": [b"Cowrie SSH Honeypot"],
b"Authorization": [b"Splunk " + self.token.encode("utf8")],
b"Content-Type": [b"application/json"],
}
)
body = FileBodyProducer(BytesIO(json.dumps(entry).encode("utf8")))
d = self.agent.request(b"POST", self.url, headers, body)
def cbBody(body):
return processResult(body)
def cbPartial(failure):
"""
Google HTTP Server does not set Content-Length. Twisted marks it as partial
"""
failure.printTraceback()
return processResult(failure.value.response)
def cbResponse(response):
if response.code == 200:
return
else:
log.msg(f"SplunkHEC response: {response.code} {response.phrase}")
d = client.readBody(response)
d.addCallback(cbBody)
d.addErrback(cbPartial)
return d
def cbError(failure):
failure.printTraceback()
def processResult(result):
j = json.loads(result)
log.msg("SplunkHEC response: {}".format(j["text"]))
d.addCallback(cbResponse)
d.addErrback(cbError)
return d
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
| 3,647 | 30.179487 | 88 | py |
cowrie | cowrie-master/src/cowrie/output/sqlite.py | from __future__ import annotations
import sqlite3
from typing import Any
from twisted.enterprise import adbapi
from twisted.internet import defer
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
sqlite output
"""
db: Any
def start(self):
"""
Start sqlite3 logging module using Twisted ConnectionPool.
Need to be started with check_same_thread=False. See
https://twistedmatrix.com/trac/ticket/3629.
"""
sqliteFilename = CowrieConfig.get("output_sqlite", "db_file")
try:
self.db = adbapi.ConnectionPool(
"sqlite3", database=sqliteFilename, check_same_thread=False
)
except sqlite3.OperationalError as e:
log.msg(e)
self.db.start()
def stop(self):
"""
Close connection to db
"""
self.db.close()
def sqlerror(self, error):
log.err("sqlite error")
error.printTraceback()
def simpleQuery(self, sql, args):
"""
Just run a deferred sql query, only care about errors
"""
d = self.db.runQuery(sql, args)
d.addErrback(self.sqlerror)
@defer.inlineCallbacks
def write(self, entry):
if entry["eventid"] == "cowrie.session.connect":
r = yield self.db.runQuery(
"SELECT `id` FROM `sensors` " "WHERE `ip` = ?", (self.sensor,)
)
if r and r[0][0]:
sensorid = r[0][0]
else:
yield self.db.runQuery(
"INSERT INTO `sensors` (`ip`) " "VALUES (?)", (self.sensor,)
)
r = yield self.db.runQuery("SELECT LAST_INSERT_ROWID()")
sensorid = int(r[0][0])
self.simpleQuery(
"INSERT INTO `sessions` (`id`, `starttime`, `sensor`, `ip`) "
"VALUES (?, ?, ?, ?)",
(entry["session"], entry["timestamp"], sensorid, entry["src_ip"]),
)
elif entry["eventid"] == "cowrie.login.success":
self.simpleQuery(
"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) "
"VALUES (?, ?, ?, ?, ?)",
(
entry["session"],
1,
entry["username"],
entry["password"],
entry["timestamp"],
),
)
elif entry["eventid"] == "cowrie.login.failed":
self.simpleQuery(
"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) "
"VALUES (?, ?, ?, ?, ?)",
(
entry["session"],
0,
entry["username"],
entry["password"],
entry["timestamp"],
),
)
elif entry["eventid"] == "cowrie.command.input":
self.simpleQuery(
"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) "
"VALUES (?, ?, ?, ?)",
(entry["session"], entry["timestamp"], 1, entry["input"]),
)
elif entry["eventid"] == "cowrie.command.failed":
self.simpleQuery(
"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) "
"VALUES (?, ?, ?, ?)",
(entry["session"], entry["timestamp"], 0, entry["input"]),
)
elif entry["eventid"] == "cowrie.session.params":
self.simpleQuery(
"INSERT INTO `params` (`session`, `arch`) " "VALUES (?, ?)",
(entry["session"], entry["arch"]),
)
elif entry["eventid"] == "cowrie.session.file_download":
self.simpleQuery(
"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) "
"VALUES (?, ?, ?, ?, ?)",
(
entry["session"],
entry["timestamp"],
entry["url"],
entry["outfile"],
entry["shasum"],
),
)
elif entry["eventid"] == "cowrie.session.file_download.failed":
self.simpleQuery(
"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) "
"VALUES (?, ?, ?, ?, ?)",
(entry["session"], entry["timestamp"], entry["url"], "NULL", "NULL"),
)
elif entry["eventid"] == "cowrie.client.version":
r = yield self.db.runQuery(
"SELECT `id` FROM `clients` " "WHERE `version` = ?", (entry["version"],)
)
if r and r[0][0]:
id = int(r[0][0])
else:
yield self.db.runQuery(
"INSERT INTO `clients` (`version`) " "VALUES (?)",
(entry["version"],),
)
r = yield self.db.runQuery("SELECT LAST_INSERT_ROWID()")
id = int(r[0][0])
self.simpleQuery(
"UPDATE `sessions` " "SET `client` = ? " "WHERE `id` = ?",
(id, entry["session"]),
)
elif entry["eventid"] == "cowrie.client.size":
self.simpleQuery(
"UPDATE `sessions` " "SET `termsize` = ? " "WHERE `id` = ?",
("{}x{}".format(entry["width"], entry["height"]), entry["session"]),
)
elif entry["eventid"] == "cowrie.session.closed":
self.simpleQuery(
"UPDATE `sessions` " "SET `endtime` = ? " "WHERE `id` = ?",
(entry["timestamp"], entry["session"]),
)
elif entry["eventid"] == "cowrie.log.closed":
self.simpleQuery(
"INSERT INTO `ttylog` (`session`, `ttylog`, `size`) "
"VALUES (?, ?, ?)",
(entry["session"], entry["ttylog"], entry["size"]),
)
elif entry["eventid"] == "cowrie.client.fingerprint":
self.simpleQuery(
"INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) "
"VALUES (?, ?, ?)",
(entry["session"], entry["username"], entry["fingerprint"]),
)
elif entry["eventid"] == "cowrie.direct-tcpip.request":
self.simpleQuery(
"INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) "
"VALUES (?, ?, ?, ?)",
(
entry["session"],
entry["timestamp"],
entry["dst_ip"],
entry["dst_port"],
),
)
elif entry["eventid"] == "cowrie.direct-tcpip.data":
self.simpleQuery(
"INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) "
"VALUES (?, ?, ?, ?, ?)",
(
entry["session"],
entry["timestamp"],
entry["dst_ip"],
entry["dst_port"],
entry["data"],
),
)
| 7,367 | 33.919431 | 102 | py |
cowrie | cowrie-master/src/cowrie/output/textlog.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import annotations
import cowrie.core.cef
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
textlog output
"""
def start(self):
self.format = CowrieConfig.get("output_textlog", "format")
self.outfile = open(
CowrieConfig.get("output_textlog", "logfile"), "a", encoding="utf-8"
)
def stop(self):
pass
def write(self, logentry):
if self.format == "cef":
self.outfile.write("{} ".format(logentry["timestamp"]))
self.outfile.write(f"{cowrie.core.cef.formatCef(logentry)}\n")
else:
self.outfile.write("{} ".format(logentry["timestamp"]))
self.outfile.write("{} ".format(logentry["session"]))
self.outfile.write("{}\n".format(logentry["message"]))
self.outfile.flush()
| 2,383 | 38.733333 | 80 | py |
cowrie | cowrie-master/src/cowrie/output/redis.py | from __future__ import annotations
import json
from configparser import NoOptionError
import redis
import cowrie.core.output
from cowrie.core.config import CowrieConfig
SEND_METHODS = {
"lpush": lambda redis_client, key, message: redis_client.lpush(key, message),
"rpush": lambda redis_client, key, message: redis_client.rpush(key, message),
"publish": lambda redis_client, key, message: redis_client.publish(key, message),
}
class Output(cowrie.core.output.Output):
"""
redis output
"""
def start(self):
"""
Initialize pymisp module and ObjectWrapper (Abstract event and object creation)
"""
host: str = CowrieConfig.get("output_redis", "host")
port: int = CowrieConfig.getint("output_redis", "port")
try:
db = CowrieConfig.getint("output_redis", "db")
except NoOptionError:
db = 0
try:
password = CowrieConfig.get("output_redis", "password")
except NoOptionError:
password = None
self.redis = redis.StrictRedis(host=host, port=port, db=db, password=password)
self.keyname = CowrieConfig.get("output_redis", "keyname")
try:
self.send_method = SEND_METHODS[
CowrieConfig.get("output_redis", "send_method")
]
except (NoOptionError, KeyError):
self.send_method = SEND_METHODS["lpush"]
def stop(self):
pass
def write(self, logentry):
"""
Push to redis
"""
# Add the entry to redis
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
self.send_method(self.redis, self.keyname, json.dumps(logentry))
| 1,797 | 27.539683 | 87 | py |
cowrie | cowrie-master/src/cowrie/output/misp.py | from __future__ import annotations
import warnings
from functools import wraps
from pathlib import Path
from pymisp import MISPAttribute, MISPEvent, MISPSighting
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
try:
from pymisp import ExpandedPyMISP as PyMISP
except ImportError:
from pymisp import PyMISP as PyMISP
# PyMISP is very verbose regarding Python 2 deprecation
def ignore_warnings(f):
@wraps(f)
def inner(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
response = f(*args, **kwargs)
return response
return inner
class Output(cowrie.core.output.Output):
"""
MISP Upload Plugin for Cowrie.
This Plugin creates a new event for unseen file uploads
or adds sightings for previously seen files.
The decision is done by searching for the SHA 256 sum in all matching attributes.
"""
debug: bool
@ignore_warnings
def start(self):
"""
Start output plugin
"""
misp_url = CowrieConfig.get("output_misp", "base_url")
misp_key = CowrieConfig.get("output_misp", "api_key")
misp_verifycert = (
"true" == CowrieConfig.get("output_misp", "verify_cert").lower()
)
self.misp_api = PyMISP(
url=misp_url, key=misp_key, ssl=misp_verifycert, debug=False
)
self.debug = CowrieConfig.getboolean("output_misp", "debug", fallback=False)
self.publish = CowrieConfig.getboolean(
"output_misp", "publish_event", fallback=False
)
def stop(self):
"""
Stop output plugin
"""
pass
def write(self, entry):
"""
Push file download to MISP
"""
if entry["eventid"] == "cowrie.session.file_download":
file_sha_attrib = self.find_attribute("sha256", entry["shasum"])
if file_sha_attrib:
# file is known, add sighting!
if self.debug:
log.msg("File known, add sighting")
self.add_sighting(entry, file_sha_attrib)
else:
# file is unknown, new event with upload
if self.debug:
log.msg("File unknwon, add new event")
self.create_new_event(entry)
@ignore_warnings
def find_attribute(self, attribute_type, searchterm):
"""
Returns a matching attribute or None if nothing was found.
"""
result = self.misp_api.search(
controller="attributes", type_attribute=attribute_type, value=searchterm
)
if result["Attribute"]:
return result["Attribute"][0]
else:
return None
@ignore_warnings
def create_new_event(self, entry):
attribute = MISPAttribute()
attribute.type = "malware-sample"
attribute.value = entry["shasum"]
attribute.data = Path(entry["outfile"])
attribute.comment = "File uploaded to Cowrie ({})".format(entry["sensor"])
attribute.expand = "binary"
if "url" in entry:
attributeURL = MISPAttribute()
attributeURL.type = "url"
attributeURL.value = entry["url"]
attributeURL.to_ids = True
else:
attributeURL = MISPAttribute()
attributeURL.type = "text"
attributeURL.value = "External upload"
attributeIP = MISPAttribute()
attributeIP.type = "ip-src"
attributeIP.value = entry["src_ip"]
attributeDT = MISPAttribute()
attributeDT.type = "datetime"
attributeDT.value = entry["timestamp"]
event = MISPEvent()
event.info = "File uploaded to Cowrie ({})".format(entry["sensor"])
event.add_tag("tlp:white")
event.attributes = [attribute, attributeURL, attributeIP, attributeDT]
event.run_expansions()
if self.publish:
event.publish()
result = self.misp_api.add_event(event)
if self.debug:
log.msg(f"Event creation result: \n{result}")
@ignore_warnings
def add_sighting(self, entry, attribute):
sighting = MISPSighting()
sighting.source = "{} (Cowrie)".format(entry["sensor"])
self.misp_api.add_sighting(sighting, attribute)
| 4,393 | 31.308824 | 85 | py |
cowrie | cowrie-master/src/cowrie/output/slack.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import annotations
import json
import time
from slack import WebClient
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
slack output
"""
def start(self):
self.slack_channel = CowrieConfig.get("output_slack", "channel")
self.slack_token = CowrieConfig.get("output_slack", "token")
def stop(self):
pass
def write(self, logentry):
for i in list(logentry.keys()):
# Remove twisted 15 legacy keys
if i.startswith("log_"):
del logentry[i]
self.sc = WebClient(self.slack_token)
self.sc.chat_postMessage(
channel=self.slack_channel,
text="{} {}".format(
time.strftime("%Y-%m-%d %H:%M:%S"),
json.dumps(logentry, indent=4, sort_keys=True),
),
)
| 2,394 | 35.287879 | 75 | py |
cowrie | cowrie-master/src/cowrie/output/cuckoo.py | # All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# notice, this list of conditions and the following disclaimer.
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# products derived from this software without specific prior written
# permission.
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Send downloaded/uplaoded files to Cuckoo
"""
from __future__ import annotations
import os
from urllib.parse import urljoin, urlparse
import requests
from requests.auth import HTTPBasicAuth
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
cuckoo output
"""
api_user: str
api_passwd: str
url_base: bytes
cuckoo_force: int
def start(self):
"""
Start output plugin
"""
self.url_base = CowrieConfig.get("output_cuckoo", "url_base").encode("utf-8")
self.api_user = CowrieConfig.get("output_cuckoo", "user")
self.api_passwd = CowrieConfig.get("output_cuckoo", "passwd", raw=True)
self.cuckoo_force = int(CowrieConfig.getboolean("output_cuckoo", "force"))
def stop(self):
"""
Stop output plugin
"""
pass
def write(self, entry):
if entry["eventid"] == "cowrie.session.file_download":
log.msg("Sending file to Cuckoo")
p = urlparse(entry["url"]).path
if p == "":
fileName = entry["shasum"]
else:
b = os.path.basename(p)
if b == "":
fileName = entry["shasum"]
else:
fileName = b
if (
self.cuckoo_force
or self.cuckoo_check_if_dup(os.path.basename(entry["outfile"])) is False
):
self.postfile(entry["outfile"], fileName)
elif entry["eventid"] == "cowrie.session.file_upload":
if (
self.cuckoo_force
or self.cuckoo_check_if_dup(os.path.basename(entry["outfile"])) is False
):
log.msg("Sending file to Cuckoo")
self.postfile(entry["outfile"], entry["filename"])
def cuckoo_check_if_dup(self, sha256: str) -> bool:
"""
Check if file already was analyzed by cuckoo
"""
try:
log.msg(f"Looking for tasks for: {sha256}")
res = requests.get(
urljoin(self.url_base, f"/files/view/sha256/{sha256}".encode()),
verify=False,
auth=HTTPBasicAuth(self.api_user, self.api_passwd),
timeout=60,
)
if res and res.ok:
log.msg(
"Sample found in Sandbox, with ID: {}".format(
res.json().get("sample", {}).get("id", 0)
)
)
return True
except Exception as e:
log.msg(e)
return False
def postfile(self, artifact, fileName):
"""
Send a file to Cuckoo
"""
with open(artifact, "rb") as art:
files = {"file": (fileName, art.read())}
try:
res = requests.post(
urljoin(self.url_base, b"tasks/create/file"),
files=files,
auth=HTTPBasicAuth(self.api_user, self.api_passwd),
verify=False,
)
if res and res.ok:
log.msg(
"Cuckoo Request: {}, Task created with ID: {}".format(
res.status_code, res.json()["task_id"]
)
)
else:
log.msg(f"Cuckoo Request failed: {res.status_code}")
except Exception as e:
log.msg(f"Cuckoo Request failed: {e}")
def posturl(self, scanUrl):
"""
Send a URL to Cuckoo
"""
data = {"url": scanUrl}
try:
res = requests.post(
urljoin(self.url_base, b"tasks/create/url"),
data=data,
auth=HTTPBasicAuth(self.api_user, self.api_passwd),
verify=False,
)
if res and res.ok:
log.msg(
"Cuckoo Request: {}, Task created with ID: {}".format(
res.status_code, res.json()["task_id"]
)
)
else:
log.msg(f"Cuckoo Request failed: {res.status_code}")
except Exception as e:
log.msg(f"Cuckoo Request failed: {e}")
| 5,799 | 33.52381 | 88 | py |