INSTRUCTION stringlengths 1 46.3k | RESPONSE stringlengths 75 80.2k |
|---|---|
Loads a task with the given ID from the given queue in the given
state. An integer may be passed in the load_executions parameter
to indicate how many executions should be loaded (starting from the
latest). If the task doesn't exist, None is returned. | def from_id(self, tiger, queue, state, task_id, load_executions=0):
"""
Loads a task with the given ID from the given queue in the given
state. An integer may be passed in the load_executions parameter
to indicate how many executions should be loaded (starting from the
latest). I... |
Returns a tuple with the following information:
* total items in the queue
* tasks from the given queue in the given state, latest first.
An integer may be passed in the load_executions parameter to indicate
how many executions should be loaded (starting from the latest). | def tasks_from_queue(self, tiger, queue, state, skip=0, limit=1000,
load_executions=0):
"""
Returns a tuple with the following information:
* total items in the queue
* tasks from the given queue in the given state, latest first.
An integer may be passed in th... |
Queries and returns the number of past task executions. | def n_executions(self):
"""
Queries and returns the number of past task executions.
"""
pipeline = self.tiger.connection.pipeline()
pipeline.exists(self.tiger._key('task', self.id))
pipeline.llen(self.tiger._key('task', self.id, 'executions'))
exists, n_executions... |
Set inputs after initialization
Parameters
-------
nr: integer
length of generated time-series
number must be power of two
qd: float
discrete variance
b: float
noise type:
0 : White Phase Modulation (WPM)
... | def set_input(self, nr=2, qd=1, b=0):
""" Set inputs after initialization
Parameters
-------
nr: integer
length of generated time-series
number must be power of two
qd: float
discrete variance
b: float
noise type:
... |
Generate noise time series based on input parameters
Returns
-------
time_series: np.array
Time series with colored noise.
len(time_series) == nr | def generateNoise(self):
""" Generate noise time series based on input parameters
Returns
-------
time_series: np.array
Time series with colored noise.
len(time_series) == nr
"""
# Fill wfb array with white noise based on given discrete variance
... |
return phase power spectral density coefficient g_b
for noise-type defined by (qd, b, tau0)
where tau0 is the interval between data points
Colored noise generated with (qd, b, tau0) parameters will
show a phase power spectral density of
S_x(f) = Phase_PSD(f) ... | def phase_psd_from_qd(self, tau0=1.0):
""" return phase power spectral density coefficient g_b
for noise-type defined by (qd, b, tau0)
where tau0 is the interval between data points
Colored noise generated with (qd, b, tau0) parameters will
show a phase power spe... |
return frequency power spectral density coefficient h_a
for the noise type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show a frequency power spectral density of
S_y(f) = Frequency_PSD(f) = h_a * f^a
where the slope a ... | def frequency_psd_from_qd(self, tau0=1.0):
""" return frequency power spectral density coefficient h_a
for the noise type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show a frequency power spectral density of
S_y(f) = Freq... |
return predicted ADEV of noise-type at given tau | def adev(self, tau0, tau):
""" return predicted ADEV of noise-type at given tau
"""
prefactor = self.adev_from_qd(tau0=tau0, tau=tau)
c = self.c_avar()
avar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(avar) |
return predicted MDEV of noise-type at given tau | def mdev(self, tau0, tau):
""" return predicted MDEV of noise-type at given tau
"""
prefactor = self.mdev_from_qd(tau0=tau0, tau=tau)
c = self.c_mvar()
mvar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(mvar) |
return tau exponent "c" for noise type.
AVAR = prefactor * h_a * tau^c | def c_avar(self):
""" return tau exponent "c" for noise type.
AVAR = prefactor * h_a * tau^c
"""
if self.b == -4:
return 1.0
elif self.b == -3:
return 0.0
elif self.b == -2:
return -1.0
elif self.b == -1:
return ... |
return tau exponent "c" for noise type.
MVAR = prefactor * h_a * tau^c | def c_mvar(self):
""" return tau exponent "c" for noise type.
MVAR = prefactor * h_a * tau^c
"""
if self.b == -4:
return 1.0
elif self.b == -3:
return 0.0
elif self.b == -2:
return -1.0
elif self.b == -1:
return ... |
prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a... | def adev_from_qd(self, tau0=1.0, tau=1.0):
""" prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2... |
calculate power spectral density of input signal x
x = signal
f_sample = sampling frequency in Hz. i.e. 1/fs is the time-interval
in seconds between datapoints
scale fft so that output corresponds to 1-sided PSD
output has units of [X^2/Hz] where X is the unit of x | def numpy_psd(x, f_sample=1.0):
""" calculate power spectral density of input signal x
x = signal
f_sample = sampling frequency in Hz. i.e. 1/fs is the time-interval
in seconds between datapoints
scale fft so that output corresponds to 1-sided PSD
output has units of [X^... |
PSD routine from scipy
we can compare our own numpy result against this one | def scipy_psd(x, f_sample=1.0, nr_segments=4):
""" PSD routine from scipy
we can compare our own numpy result against this one
"""
f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments)
return f_axis, psd_of_x |
generate time series with white noise that has constant PSD = b0,
up to the nyquist frequency fs/2
N = number of samples
b0 = desired power-spectral density in [X^2/Hz] where X is the unit of x
fs = sampling frequency, i.e. 1/fs is the time-interval between datapoints
the pre-fa... | def white(num_points=1024, b0=1.0, fs=1.0):
""" generate time series with white noise that has constant PSD = b0,
up to the nyquist frequency fs/2
N = number of samples
b0 = desired power-spectral density in [X^2/Hz] where X is the unit of x
fs = sampling frequency, i.e. 1/fs is the ... |
Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian noise. | def brown(num_points=1024, b2=1.0, fs=1.0):
""" Brownian or random walk (diffusion) noise with 1/f^2 PSD
(not really a color... rather Brownian or random-walk)
N = number of samples
b2 = desired PSD is b2*f^-2
fs = sampling frequency
we integrate white-noise to get Brownian... |
N-length vector with (approximate) pink noise
pink noise has 1/f PSD | def pink(N, depth=80):
"""
N-length vector with (approximate) pink noise
pink noise has 1/f PSD
"""
a = []
s = iterpink(depth)
for n in range(N):
a.append(next(s))
return a |
Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculat... | def iterpink(depth=20):
"""Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many sam... |
plot a line with the slope alpha | def plotline(plt, alpha, taus, style,label=""):
""" plot a line with the slope alpha """
y = [pow(tt, alpha) for tt in taus]
plt.loglog(taus, y, style,label=label) |
B1 ratio for noise identification
ratio of Standard Variace to AVAR | def b1_noise_id(x, af, rate):
""" B1 ratio for noise identification
ratio of Standard Variace to AVAR
"""
(taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate)
oadev_x = devs[0]
y = np.diff(x)
y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length
ass... |
use matplotlib methods for plotting
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to False
grid : boolean
Plot grid. Defaults to False | def plot(self, atDataset,
errorbars=False,
grid=False):
""" use matplotlib methods for plotting
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to F... |
returns confidence interval (dev_min, dev_max)
for a given deviation dev, equivalent degrees of freedom edf,
and degree of confidence ci.
Parameters
----------
dev: float
Mean value (e.g. adev) around which we produce the confidence interval
edf: float
Equivalen... | def confidence_interval(dev, edf, ci=ONE_SIGMA_CI):
""" returns confidence interval (dev_min, dev_max)
for a given deviation dev, equivalent degrees of freedom edf,
and degree of confidence ci.
Parameters
----------
dev: float
Mean value (e.g. adev) around which we prod... |
returns confidence interval (dev_min, dev_max)
for a given deviation dev = Xdev( x, tau = af*(1/rate) )
steps:
1) identify noise type
2) compute EDF
3) compute confidence interval
Parameters
----------
x: numpy.array
time-series
dev: flo... | def confidence_interval_noiseID(x, dev, af, dev_type="adev", data_type="phase", ci=ONE_SIGMA_CI):
""" returns confidence interval (dev_min, dev_max)
for a given deviation dev = Xdev( x, tau = af*(1/rate) )
steps:
1) identify noise type
2) compute EDF
3) compute conf... |
R(n) ratio for noise identification
ration of MVAR to AVAR | def rn(x, af, rate):
""" R(n) ratio for noise identification
ration of MVAR to AVAR
"""
(taus,devs,errs,ns) = at.adev(x,taus=[af*rate], data_type='phase', rate=rate)
oadev_x = devs[0]
(mtaus,mdevs,errs,ns) = at.mdev(x,taus=[af*rate], data_type='phase', rate=rate)
mdev_x = mdevs[0]
... |
R(n) ratio expected from theory for given noise type
alpha = b + 2 | def rn_theory(af, b):
""" R(n) ratio expected from theory for given noise type
alpha = b + 2
"""
# From IEEE1139-2008
# alpha beta ADEV_mu MDEV_mu Rn_mu
# -2 -4 1 1 0 Random Walk FM
# -1 -3 0 0 0 Flicker FM
# ... |
R(n) ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2 | def rn_boundary(af, b_hi):
"""
R(n) ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2
"""
return np.sqrt( rn_theory(af, b)*rn_theory(af, b-1) ) |
Expected B1 ratio for given time-series length N and exponent mu
FIXME: add reference (paper & link)
The exponents are defined as
S_y(f) = h_a f^alpha (power spectrum of y)
S_x(f) = g_b f^b (power spectrum of x)
bias = const * tau^mu
and (... | def b1_theory(N, mu):
""" Expected B1 ratio for given time-series length N and exponent mu
FIXME: add reference (paper & link)
The exponents are defined as
S_y(f) = h_a f^alpha (power spectrum of y)
S_x(f) = g_b f^b (power spectrum of x)
bias = const *... |
B1 ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2 | def b1_boundary(b_hi, N):
"""
B1 ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2
"""
b_lo = b_hi-1
b1_lo = b1_theory(N, b_to_mu(b_lo))
b1_hi = b1_theory(N, b_to_mu(b_hi))
if b1_lo >= -4:
return np.sqrt(b1_lo*b1_hi) # geometric mean
else:
return ... |
Lag-1 autocorrelation function
as defined in Riley 2004, Eqn (2)
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
Returns
-------
ACF: float
Lag-1 autocorrelation for input time-series x
... | def lag1_acf(x, detrend_deg=1):
""" Lag-1 autocorrelation function
as defined in Riley 2004, Eqn (2)
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
Returns
-------
ACF: float
Lag-1 autocorr... |
Lag-1 autocorrelation based noise identification
Parameters
----------
x: numpy.array
phase or fractional frequency time-series data
minimum recommended length is len(x)>30 roughly.
af: int
averaging factor
data_type: string {'phase', 'freq'}
"phase" for phas... | def autocorr_noise_id(x, af, data_type="phase", dmin=0, dmax=2):
""" Lag-1 autocorrelation based noise identification
Parameters
----------
x: numpy.array
phase or fractional frequency time-series data
minimum recommended length is len(x)>30 roughly.
af: int
averagin... |
remove polynomial from data.
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
deg: int
degree of polynomial to remove from x
Returns
-------
x_detrended: numpy.array
detrended time-series | def detrend(x, deg=1):
"""
remove polynomial from data.
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
deg: int
degree of polynomial to remove from x
Returns
-------
x_detrended: numpy.array
detrended time-series... |
Eqn (13) from Greenhall2004 | def edf_greenhall_simple(alpha, d, m, S, F, N):
""" Eqn (13) from Greenhall2004 """
L = m/F+m*d # length of filter applied to phase samples
M = 1 + np.floor(S*(N-L) / m)
J = min(M, (d+1)*S)
inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, F, alph... |
returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging... | def edf_greenhall(alpha, d, m, N, overlapping=False, modified=False, verbose=False):
""" returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
... |
Eqn (10) from Greenhall2004 | def greenhall_BasicSum(J, M, S, F, alpha, d):
""" Eqn (10) from Greenhall2004 """
first = pow(greenhall_sz(0, F, alpha, d), 2)
second = (1-float(J)/float(M))*pow(greenhall_sz(float(J)/float(S), F, alpha, d), 2)
third = 0
for j in range(1, int(J)):
third += 2*(1.0-float(j)/float(M))*pow(green... |
Eqn (9) from Greenhall2004 | def greenhall_sz(t, F, alpha, d):
""" Eqn (9) from Greenhall2004 """
if d == 1:
a = 2*greenhall_sx(t, F, alpha)
b = greenhall_sx(t-1.0, F, alpha)
c = greenhall_sx(t+1.0, F, alpha)
return a-b-c
elif d == 2:
a = 6*greenhall_sx(t, F, alpha)
b = 4*greenhall_sx(t-1... |
Eqn (8) from Greenhall2004 | def greenhall_sx(t, F, alpha):
""" Eqn (8) from Greenhall2004
"""
if F == float('inf'):
return greenhall_sw(t, alpha+2)
a = 2*greenhall_sw(t, alpha)
b = greenhall_sw(t-1.0/float(F), alpha)
c = greenhall_sw(t+1.0/float(F), alpha)
return pow(F, 2)*(a-b-c) |
Eqn (7) from Greenhall2004 | def greenhall_sw(t, alpha):
""" Eqn (7) from Greenhall2004
"""
alpha = int(alpha)
if alpha == 2:
return -np.abs(t)
elif alpha == 1:
if t == 0:
return 0
else:
return pow(t, 2)*np.log(np.abs(t))
elif alpha == 0:
return np.abs(pow(t, 3))
... |
Table 2 from Greenhall 2004 | def greenhall_table2(alpha, d):
""" Table 2 from Greenhall 2004 """
row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6
assert(row_idx in [0, 1, 2, 3, 4, 5])
col_idx = int(d-1)
table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2
[(78.6, 25.2), (790.0, ... |
Table 1 from Greenhall 2004 | def greenhall_table1(alpha, d):
""" Table 1 from Greenhall 2004 """
row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6
col_idx = int(d-1)
table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2
[(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)],
[... |
Equivalent degrees of freedom for Total Deviation
FIXME: what is the right behavior for alpha outside 0,-1,-2?
NIST SP1065 page 41, Table 7 | def edf_totdev(N, m, alpha):
""" Equivalent degrees of freedom for Total Deviation
FIXME: what is the right behavior for alpha outside 0,-1,-2?
NIST SP1065 page 41, Table 7
"""
alpha = int(alpha)
if alpha in [0, -1, -2]:
# alpha 0 WFM
# alpha -1 FFM
# al... |
Equivalent degrees of freedom for Modified Total Deviation
NIST SP1065 page 41, Table 8 | def edf_mtotdev(N, m, alpha):
""" Equivalent degrees of freedom for Modified Total Deviation
NIST SP1065 page 41, Table 8
"""
assert(alpha in [2, 1, 0, -1, -2])
NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)]
#(b, c) = NIST_SP1065_table8[ abs(al... |
Equivalent degrees of freedom.
Simple approximate formulae.
Parameters
----------
N : int
the number of phase samples
m : int
averaging factor, tau = m * tau0
alpha: int
exponent of f for the frequency PSD:
'wp' returns white phase noise. alpha=+2
... | def edf_simple(N, m, alpha):
"""Equivalent degrees of freedom.
Simple approximate formulae.
Parameters
----------
N : int
the number of phase samples
m : int
averaging factor, tau = m * tau0
alpha: int
exponent of f for the frequency PSD:
'wp' returns white p... |
Compute the GRADEV of a white phase noise. Compares two different
scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. | def example1():
"""
Compute the GRADEV of a white phase noise. Compares two different
scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
"""
N = 1000
f = 1
y = np.random.randn(1,N)[0,:]
x = [xx for xx in np.linspace(1,len(y),len(y))]
x_ax, y_ax, (err_l, err_h... |
Compute the GRADEV of a nonstationary white phase noise. | def example2():
"""
Compute the GRADEV of a nonstationary white phase noise.
"""
N=1000 # number of samples
f = 1 # data samples per second
s=1+5/N*np.arange(0,N)
y=s*np.random.randn(1,N)[0,:]
x = [xx for xx in np.linspace(1,len(y),len(y))]
x_ax, y_ax, (err_l, err_h) , ns = allan.gra... |
Time deviation.
Based on modified Allan variance.
.. math::
\\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 }
\\sigma^2_{MDEV}( \\tau )
Note that TDEV has a unit of seconds.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractio... | def tdev(data, rate=1.0, data_type="phase", taus=None):
""" Time deviation.
Based on modified Allan variance.
.. math::
\\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 }
\\sigma^2_{MDEV}( \\tau )
Note that TDEV has a unit of seconds.
Parameters
----------
data: np.arra... |
Modified Allan deviation.
Used to distinguish between White and Flicker Phase Modulation.
.. math::
\\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) }
\\sum_{j=1}^{N-3m+1} \\lbrace
\\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2
Parameters
--... | def mdev(data, rate=1.0, data_type="phase", taus=None):
""" Modified Allan deviation.
Used to distinguish between White and Flicker Phase Modulation.
.. math::
\\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) }
\\sum_{j=1}^{N-3m+1} \\lbrace
\\sum_{i=j}^{j+m-1... |
Allan deviation.
Classic - use only if required - relatively poor confidence.
.. math::
\\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 }
\\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle
= { 1 \\over 2 (N-2) \\tau^2 }
\\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2
... | def adev(data, rate=1.0, data_type="phase", taus=None):
""" Allan deviation.
Classic - use only if required - relatively poor confidence.
.. math::
\\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 }
\\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle
= { 1 \\over 2 (N-2) \\tau^2... |
Main algorithm for adev() (stride=mj) and oadev() (stride=1)
see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, ... | def calc_adev_phase(phase, rate, mj, stride):
""" Main algorithm for adev() (stride=mj) and oadev() (stride=1)
see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
Parameters
----------
phase: np.array
Phase data in seconds.
rate: f... |
Overlapping Hadamard deviation.
Better confidence than normal Hadamard.
.. math::
\\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) }
\\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2
where :math:`x_i` is the time-series of phase observations, spaced
... | def ohdev(data, rate=1.0, data_type="phase", taus=None):
""" Overlapping Hadamard deviation.
Better confidence than normal Hadamard.
.. math::
\\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) }
\\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2
wher... |
main calculation fungtion for HDEV and OHDEV
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, de... | def calc_hdev_phase(phase, rate, mj, stride):
""" main calculation fungtion for HDEV and OHDEV
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
... |
Total deviation.
Better confidence at long averages for Allan.
.. math::
\\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) }
\\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2
Where :math:`x^*_i` is a new time-series of length :math:`3N-4`
derived from ... | def totdev(data, rate=1.0, data_type="phase", taus=None):
""" Total deviation.
Better confidence at long averages for Allan.
.. math::
\\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) }
\\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2
Where :math:`x^... |
Time Total Deviation
modified total variance scaled by tau^2 / 3
NIST SP 1065 eqn (28) page 26 <--- formula should have tau squared !?! | def ttotdev(data, rate=1.0, data_type="phase", taus=None):
""" Time Total Deviation
modified total variance scaled by tau^2 / 3
NIST SP 1065 eqn (28) page 26 <--- formula should have tau squared !?!
"""
(taus, mtotdevs, mde, ns) = mtotdev(data, data_type=data_type,
... |
PRELIMINARY - REQUIRES FURTHER TESTING.
Modified Total deviation.
Better confidence at long averages for modified Allan
FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6
The variance is scaled up (divided by this number) based on the
noise-type identified.
WPM... | def mtotdev(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Modified Total deviation.
Better confidence at long averages for modified Allan
FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6
The variance is scaled up (divided by t... |
PRELIMINARY - REQUIRES FURTHER TESTING.
Hadamard Total deviation.
Better confidence at long averages for Hadamard deviation
FIXME: bias corrections from http://www.wriley.com/CI2.pdf
W FM 0.995 alpha= 0
F FM 0.851 alpha=-1
RW FM 0.771 alpha=-2
... | def htotdev(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Hadamard Total deviation.
Better confidence at long averages for Hadamard deviation
FIXME: bias corrections from http://www.wriley.com/CI2.pdf
W FM 0.995 alpha= 0
F... |
PRELIMINARY - REQUIRES FURTHER TESTING.
calculation of htotdev for one averaging factor m
tau = m*tau0
Parameters
----------
frequency: np.array
Fractional frequency data (nondimensional).
m: int
Averaging factor. tau = m*tau0, where tau0=1/rate. | def calc_htotdev_freq(freq, m):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
calculation of htotdev for one averaging factor m
tau = m*tau0
Parameters
----------
frequency: np.array
Fractional frequency data (nondimensional).
m: int
Averaging f... |
PRELIMINARY - REQUIRES FURTHER TESTING.
Theo1 is a two-sample variance with improved confidence and
extended averaging factor range.
.. math::
\\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) }
\\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1}
... | def theo1(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Theo1 is a two-sample variance with improved confidence and
extended averaging factor range.
.. math::
\\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) }
... |
Time Interval Error RMS.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Def... | def tierms(data, rate=1.0, data_type="phase", taus=None):
""" Time Interval Error RMS.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type:... |
Make an ndarray with a rolling window of the last dimension, from
http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054401.html
Parameters
----------
a : array_like
Array to add rolling window to
window : int
Size of rolling window
Returns
-------
Array that... | def mtie_rolling_window(a, window):
"""
Make an ndarray with a rolling window of the last dimension, from
http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054401.html
Parameters
----------
a : array_like
Array to add rolling window to
window : int
Size of rollin... |
Maximum Time Interval Error.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency.... | def mtie(data, rate=1.0, data_type="phase", taus=None):
""" Maximum Time Interval Error.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_typ... |
fast binary decomposition algorithm for MTIE
See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in
Characterization of Network Synchronization Performance" | def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None):
""" fast binary decomposition algorithm for MTIE
See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in
Characterization of Network Synchronization Performance"
"""
rate = float(rate)
phase = np.asarray(p... |
gap resistant overlapping Allan deviation
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional). Warning : phase data works better (frequency data is
first trantformed into phase using numpy.cumsum() function, which can
... | def gradev(data, rate=1.0, data_type="phase", taus=None,
ci=0.9, noisetype='wp'):
""" gap resistant overlapping Allan deviation
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional). Warning : phase data works better (fr... |
see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
stride = 1 for overlapping allan deviation
see http://en.wikipedia.org/wiki/Allan_variance
1 1
s2y(t) = --------- sum [x(i+2) - 2x(i+1) + x(i) ]^2
2*tau^2 | def calc_gradev_phase(data, rate, mj, stride, confidence, noisetype):
""" see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
stride = 1 for overlapping allan deviation
see http://en.wikipedia.org/wiki/Allan_variance
1 1
... |
Take either phase or frequency as input and return phase | def input_to_phase(data, rate, data_type):
""" Take either phase or frequency as input and return phase
"""
if data_type == "phase":
return data
elif data_type == "freq":
return frequency2phase(data, rate)
else:
raise Exception("unknown data_type: " + data_type) |
pre-processing of the tau-list given by the user (Helper function)
Does sanity checks, sorts data, removes duplicates and invalid values.
Generates a tau-list based on keywords 'all', 'decade', 'octave'.
Uses 'octave' by default if no taus= argument is given.
Parameters
----------
data: np.arr... | def tau_generator(data, rate, taus=None, v=False, even=False, maximum_m=-1):
""" pre-processing of the tau-list given by the user (Helper function)
Does sanity checks, sorts data, removes duplicates and invalid values.
Generates a tau-list based on keywords 'all', 'decade', 'octave'.
Uses 'octave' by d... |
Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one mig... | def tau_reduction(ms, rate, n_per_decade):
"""Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g... |
Remove results with small number of samples.
If n is small (==1), reject the result
Parameters
----------
taus: array
List of tau values for which deviation were computed
devs: array
List of deviations
deverrs: array or list of arrays
List of estimated errors (possib... | def remove_small_ns(taus, devs, deverrs, ns):
""" Remove results with small number of samples.
If n is small (==1), reject the result
Parameters
----------
taus: array
List of tau values for which deviation were computed
devs: array
List of deviations
deverrs: array or l... |
Trim leading and trailing NaNs from dataset
This is done by browsing the array from each end and store the index of the
first non-NaN in each case, the return the appropriate slice of the array | def trim_data(x):
"""
Trim leading and trailing NaNs from dataset
This is done by browsing the array from each end and store the index of the
first non-NaN in each case, the return the appropriate slice of the array
"""
# Find indices for first and last valid data
first = 0
while np.isna... |
Three Cornered Hat Method
Given three clocks A, B, C, we seek to find their variances
:math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`.
We measure three phase differences, assuming no correlation between
the clocks, the measurements have variances:
.. math::
\\sigma^2_{AB} = ... | def three_cornered_hat_phase(phasedata_ab, phasedata_bc,
phasedata_ca, rate, taus, function):
"""
Three Cornered Hat Method
Given three clocks A, B, C, we seek to find their variances
:math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`.
We measure three phase ... |
integrate fractional frequency data and output phase data
Parameters
----------
freqdata: np.array
Data array of fractional frequency measurements (nondimensional)
rate: float
The sampling rate for phase or frequency, in Hz
Returns
-------
phasedata: np.array
Time i... | def frequency2phase(freqdata, rate):
""" integrate fractional frequency data and output phase data
Parameters
----------
freqdata: np.array
Data array of fractional frequency measurements (nondimensional)
rate: float
The sampling rate for phase or frequency, in Hz
Returns
-... |
Convert phase in seconds to phase in radians
Parameters
----------
phasedata: np.array
Data array of phase in seconds
v0: float
Nominal oscillator frequency in Hz
Returns
-------
fi:
phase data in radians | def phase2radians(phasedata, v0):
""" Convert phase in seconds to phase in radians
Parameters
----------
phasedata: np.array
Data array of phase in seconds
v0: float
Nominal oscillator frequency in Hz
Returns
-------
fi:
phase data in radians
"""
fi = [2... |
Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
... | def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean... |
Optionnal method if you chose not to set inputs on init
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'p... | def set_input(self, data,
rate=1.0, data_type="phase", taus=None):
""" Optionnal method if you chose not to set inputs on init
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
... |
Evaluate the passed function with the supplied data.
Stores result in self.out.
Parameters
----------
function: str
Name of the :mod:`allantools` function to evaluate
Returns
-------
result: dict
The results of the calculation. | def compute(self, function):
"""Evaluate the passed function with the supplied data.
Stores result in self.out.
Parameters
----------
function: str
Name of the :mod:`allantools` function to evaluate
Returns
-------
result: dict
T... |
compute average of many PSDs | def many_psds(k=2,fs=1.0, b0=1.0, N=1024):
""" compute average of many PSDs """
psd=[]
for j in range(k):
print j
x = noise.white(N=2*4096,b0=b0,fs=fs)
f, tmp = noise.numpy_psd(x,fs)
if j==0:
psd = tmp
else:
psd = psd + tmp
return f, psd/k |
Find organization that has the current identity as the owner or as the member | def list_my(self):
""" Find organization that has the current identity as the owner or as the member """
org_list = self.call_contract_command("Registry", "listOrganizations", [])
rez_owner = []
rez_member = []
for idx, org_id in enumerate(org_list):
(found, org_id,... |
Return new group_id in base64 | def add_group(self, group_name, payment_address):
""" Return new group_id in base64 """
if (self.is_group_name_exists(group_name)):
raise Exception("the group \"%s\" is already present"%str(group_name))
group_id_base64 = base64.b64encode(secrets.token_bytes(32))
self.m["group... |
check if group with given name is already exists | def is_group_name_exists(self, group_name):
""" check if group with given name is already exists """
groups = self.m["groups"]
for g in groups:
if (g["group_name"] == group_name):
return True
return False |
return group with given group_id (return None if doesn't exists) | def get_group_by_group_id(self, group_id):
""" return group with given group_id (return None if doesn't exists) """
group_id_base64 = base64.b64encode(group_id).decode('ascii')
groups = self.m["groups"]
for g in groups:
if (g["group_id"] == group_id_base64):
r... |
In all getter function in case of single payment group, group_name can be None | def get_group_name_nonetrick(self, group_name = None):
""" In all getter function in case of single payment group, group_name can be None """
groups = self.m["groups"]
if (len(groups) == 0):
raise Exception("Cannot find any groups in metadata")
if (not group_name):
... |
make tar from protodir/*proto, and publish this tar in ipfs
return base58 encoded ipfs hash | def publish_proto_in_ipfs(ipfs_client, protodir):
"""
make tar from protodir/*proto, and publish this tar in ipfs
return base58 encoded ipfs hash
"""
if (not os.path.isdir(protodir)):
raise Exception("Directory %s doesn't exists"%protodir)
files = glob.glob(os.path.join(protodir, "... |
Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise | def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True):
"""
Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
"""
if validate:
from snet_cli.resources.proto.unixfs_pb2 import Data
from snet_cli.resources... |
Convert in and from bytes uri format used in Registry contract | def hash_to_bytesuri(s):
"""
Convert in and from bytes uri format used in Registry contract
"""
# TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt)
s = "ipfs://" + s
return s.encode("ascii").ljust(32 * (len(... |
Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files | def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir):
"""
Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files
"""
spec_tar = get_from_ipfs_and_checkhash(ip... |
import protobuf and return stub and request class | def _get_stub_and_request_classes(self, service_name):
""" import protobuf and return stub and request class """
# Compile protobuf if needed
codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service")
proto_dir = Path(__file__).absolute().parent.joinpath("resources", ... |
Safely run StartClaim for given channels | def _start_claim_channels(self, grpc_channel, channels_ids):
""" Safely run StartClaim for given channels """
unclaimed_payments = self._call_GetListUnclaimed(grpc_channel)
unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments}
to_claim = []
for channel_id i... |
Claim all 'pending' payments in progress and after we claim given channels | def _claim_in_progress_and_claim_channels(self, grpc_channel, channels):
""" Claim all 'pending' payments in progress and after we claim given channels """
# first we get the list of all 'payments in progress' in case we 'lost' some payments.
payments = self._call_GetListInProgress(grpc_channel)... |
Create default configuration if config file does not exist | def create_default_config(self):
""" Create default configuration if config file does not exist """
# make config directory with the minimal possible permission
self._config_file.parent.mkdir(mode=0o700, exist_ok=True)
self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan... |
Dynamic import of grpc-protobuf from given directory (proto_dir)
service_name should be provided only in the case of conflicting method names (two methods with the same name in difference services).
Return stub_class, request_class, response_class
! We need response_class only for json payload encoding ! | def import_protobuf_from_dir(proto_dir, method_name, service_name = None):
"""
Dynamic import of grpc-protobuf from given directory (proto_dir)
service_name should be provided only in the case of conflicting method names (two methods with the same name in difference services).
Return stub_class, request... |
helper function which try to import method from the given _pb2_grpc.py file
service_name should be provided only in case of name conflict
return (False, None) in case of failure
return (True, (stub_class, request_class, response_class)) in case of success | def _import_protobuf_from_file(grpc_pyfile, method_name, service_name = None):
"""
helper function which try to import method from the given _pb2_grpc.py file
service_name should be provided only in case of name conflict
return (False, None) in case of failure
return (True, (stub_class, request_cla... |
Switch payload encoding to JSON for GRPC call | def switch_to_json_payload_encoding(call_fn, response_class):
""" Switch payload encoding to JSON for GRPC call """
def json_serializer(*args, **kwargs):
return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8")
def json_deserializer(*args, **kwargs):
... |
possible modifiers: file, b64encode, b64decode
format: modifier1@modifier2@...modifierN@k_final | def _transform_call_params(self, params):
"""
possible modifiers: file, b64encode, b64decode
format: modifier1@modifier2@...modifierN@k_final
"""
rez = {}
for k, v in params.items():
if isinstance(v, dict):
v = self._transform_call_... |
We get state of the channel (nonce, amount, unspent_amount)
We do it by securely combine information from the server and blockchain
https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md | def _get_channel_state_statelessly(self, grpc_channel, channel_id):
"""
We get state of the channel (nonce, amount, unspent_amount)
We do it by securely combine information from the server and blockchain
https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscro... |
Print balance of ETH, AGI, and MPE wallet | def print_agi_and_mpe_balances(self):
""" Print balance of ETH, AGI, and MPE wallet """
if (self.args.account):
account = self.args.account
else:
account = self.ident.address
eth_wei = self.w3.eth.getBalance(account)
agi_cogs = self.call_contract_command(... |
Publish proto files in ipfs and print hash | def publish_proto_in_ipfs(self):
""" Publish proto files in ipfs and print hash """
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
self._printout(ipfs_hash_base58) |
Publish protobuf model in ipfs and update existing metadata file | def publish_proto_metadata_update(self):
""" Publish protobuf model in ipfs and update existing metadata file """
metadata = load_mpe_service_metadata(self.args.metadata_file)
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
metadata.set_si... |
Metadata: add endpoint to the group | def metadata_add_endpoints(self):
""" Metadata: add endpoint to the group """
metadata = load_mpe_service_metadata(self.args.metadata_file)
group_name = metadata.get_group_name_nonetrick(self.args.group_name)
for endpoint in self.args.endpoints:
metadata.add_endpoint(group_na... |
Metadata: remove all endpoints from all groups | def metadata_remove_all_endpoints(self):
""" Metadata: remove all endpoints from all groups """
metadata = load_mpe_service_metadata(self.args.metadata_file)
metadata.remove_all_endpoints()
metadata.save_pretty(self.args.metadata_file) |
Metadata: Remove all endpoints from the group and add new ones | def metadata_update_endpoints(self):
""" Metadata: Remove all endpoints from the group and add new ones """
metadata = load_mpe_service_metadata(self.args.metadata_file)
group_name = metadata.get_group_name_nonetrick(self.args.group_name)
metadata.remove_all_endpoints_for_group(group_nam... |
get persistent storage for mpe | def _get_persistent_mpe_dir(self):
""" get persistent storage for mpe """
mpe_address = self.get_mpe_address().lower()
registry_address = self.get_registry_address().lower()
return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address)) |
return {channel_id: channel} | def _get_initialized_channels_dict_for_service(self, org_id, service_id):
'''return {channel_id: channel}'''
fn = self._get_channels_info_file(org_id, service_id)
if (os.path.isfile(fn)):
return pickle.load( open( fn, "rb" ) )
else:
return {} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.