text
stringlengths
75
104k
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): """Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) """ ti = self.T - 2 - t # t+1 in reverse if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
def _twofilter_smoothing_ON2(self, t, ti, info, phi, lwinfo): """O(N^2) version of two-filter smoothing. This method should not be called directly, see twofilter_smoothing. """ sp, sw = 0., 0. upb = lwinfo.max() + self.wgt[t].lw.max() if hasattr(self.model, 'upper_bound_trans'): upb += self.model.upper_bound_trans(t + 1) # Loop over n, to avoid having in memory a NxN matrix for n in range(self.N): omegan = np.exp(lwinfo + self.wgt[t].lw[n] - upb + self.model.logpt(t + 1, self.X[t][n], info.hist.X[ti])) sp += np.sum(omegan * phi(self.X[t][n], info.hist.X[ti])) sw += np.sum(omegan) return sp / sw
def _twofilter_smoothing_ON(self, t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info): """O(N) version of two-filter smoothing. This method should not be called directly, see twofilter_smoothing. """ if modif_info is not None: lwinfo += modif_info Winfo = rs.exp_and_normalise(lwinfo) I = rs.multinomial(Winfo) if modif_forward is not None: lw = self.wgt[t].lw + modif_forward W = rs.exp_and_normalise(lw) else: W = self.wgt[t].W J = rs.multinomial(W) log_omega = self.model.logpt(t + 1, self.X[t][J], info.hist.X[ti][I]) if modif_forward is not None: log_omega -= modif_forward[J] if modif_info is not None: log_omega -= modif_info[I] Om = rs.exp_and_normalise(log_omega) est = np.average(phi(self.X[t][J], info.hist.X[ti][I]), axis=0, weights=Om) if return_ess: return (est, 1. / np.sum(Om**2)) else: return est
def multiSMC(nruns=10, nprocs=0, out_func=None, **args): """Run SMC algorithms in parallel, for different combinations of parameters. `multiSMC` relies on the `multiplexer` utility, and obeys the same logic. A basic usage is:: results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0) This runs the same SMC algorithm 20 times, using all available CPU cores. The output, ``results``, is a list of 20 dictionaries; a given dict corresponds to a single run, and contains the following (key, value) pairs: + ``'run'``: a run identifier (a number between 0 and nruns-1) + ``'output'``: the corresponding SMC object (once method run was completed) Since a `SMC` object may take a lot of space in memory (especially when the option ``store_history`` is set to True), it is possible to require `multiSMC` to store only some chosen summary of the SMC runs, using option `out_func`. For instance, if we only want to store the estimate of the log-likelihood of the model obtained from each particle filter:: of = lambda pf: pf.logLt results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of) It is also possible to vary the parameters. Say:: results = multiSMC(fk=my_fk_model, N=[100, 500, 1000]) will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for N=500, and 10 times for N=1000. The number 10 comes from the fact that we did not specify nruns, and its default value is 10. The 30 dictionaries obtained in results will then contain an extra (key, value) pair that will give the value of N for which the run was performed. It is possible to vary several arguments. Each time a list must be provided. The end result will amount to take a *cartesian product* of the arguments:: results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial', 'residual'], nruns=20) In that case we run our algorithm 80 times: 20 times with N=100 and resampling set to multinomial, 20 times with N=100 and resampling set to residual and so on. Parameters ---------- * nruns: int, optional number of runs (default is 10) * nprocs: int, optional number of processors to use; if negative, number of cores not to use. Default value is 1 (no multiprocessing) * out_func: callable, optional function to transform the output of each SMC run. (If not given, output will be the complete SMC object). * args: dict arguments passed to SMC class Returns ------- A list of dicts See also -------- `utils.multiplexer`: for more details on the syntax. """ def f(**args): pf = SMC(**args) pf.run() return out_func(pf) if out_func is None: out_func = lambda x: x return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True, **args)
def logpt(self, t, xp, x): """Log-density of X_t given X_{t-1}. """ raise NotImplementedError(err_msg_missing_trans % self.__class__.__name__)
def reset_weights(self): """Reset weights after a resampling step. """ if self.fk.isAPF: lw = (rs.log_mean_exp(self.logetat, W=self.W) - self.logetat[self.A]) self.wgts = rs.Weights(lw=lw) else: self.wgts = rs.Weights()
def setup_auxiliary_weights(self): """Compute auxiliary weights (for APF). """ if self.fk.isAPF: self.logetat = self.fk.logeta(self.t - 1, self.X) self.aux = self.wgts.add(self.logetat) else: self.aux = self.wgts
def exp_and_normalise(lw): """Exponentiate, then normalise (so that sum equals one). Arguments --------- lw: ndarray log weights. Returns ------- W: ndarray of the same shape as lw W = exp(lw) / sum(exp(lw)) Note ---- uses the log_sum_exp trick to avoid overflow (i.e. subtract the max before exponentiating) See also -------- log_sum_exp log_mean_exp """ w = np.exp(lw - lw.max()) return w / w.sum()
def essl(lw): """ESS (Effective sample size) computed from log-weights. Parameters ---------- lw: (N,) ndarray log-weights Returns ------- float the ESS of weights w = exp(lw), i.e. the quantity sum(w**2) / (sum(w))**2 Note ---- The ESS is a popular criterion to determine how *uneven* are the weights. Its value is in the range [1, N], it equals N when weights are constant, and 1 if all weights but one are zero. """ w = np.exp(lw - lw.max()) return (w.sum())**2 / np.sum(w**2)
def log_sum_exp(v): """Log of the sum of the exp of the arguments. Parameters ---------- v: ndarray Returns ------- l: float l = log(sum(exp(v))) Note ---- use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v before exponentiating, then we add it back See also -------- log_mean_exp """ m = v.max() return m + np.log(np.sum(np.exp(v - m)))
def log_sum_exp_ab(a, b): """log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b) """ if a > b: return a + np.log(1. + np.exp(b - a)) else: return b + np.log(1. + np.exp(a - b))
def log_mean_exp(v, W=None): """Returns log of (weighted) mean of exp(v). Parameters ---------- v: ndarray data, should be such that v.shape[0] = N W: (N,) ndarray, optional normalised weights (>=0, sum to one) Returns ------- ndarray mean (or weighted mean, if W is provided) of vector exp(v) See also -------- log_sum_exp """ m = v.max() V = np.exp(v - m) if W is None: return m + np.log(np.mean(V)) else: return m + np.log(np.average(V, weights=W))
def wmean_and_var(W, x): """Component-wise weighted mean and variance. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: ndarray (such that shape[0]==N) data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.average(x, weights=W, axis=0) m2 = np.average(x**2, weights=W, axis=0) v = m2 - m**2 return {'mean': m, 'var': v}
def wmean_and_var_str_array(W, x): """Weighted mean and variance of each component of a structured array. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: (N,) structured array data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.empty(shape=x.shape[1:], dtype=x.dtype) v = np.empty_like(m) for p in x.dtype.names: m[p], v[p] = wmean_and_var(W, x[p]).values() return {'mean': m, 'var': v}
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)): """Quantiles for weighted data. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) or (N,d) ndarray data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- a (k,) or (d, k) ndarray containing the alpha-quantiles """ if len(x.shape) == 1: return _wquantiles(W, x, alphas=alphas) elif len(x.shape) == 2: return np.array([_wquantiles(W, x[:, i], alphas=alphas) for i in range(x.shape[1])])
def wquantiles_str_array(W, x, alphas=(0.25, 0.50, 0,75)): """quantiles for weighted data stored in a structured array. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) structured array data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- dictionary {p: quantiles} that stores for each field name p the corresponding quantiles """ return {p: wquantiles(W, x[p], alphas) for p in x.dtype.names}
def resampling_scheme(func): """Decorator for resampling schemes.""" @functools.wraps(func) def modif_func(W, M=None): M = W.shape[0] if M is None else M return func(W, M) rs_funcs[func.__name__] = modif_func modif_func.__doc__ = rs_doc % func.__name__.capitalize() return modif_func
def inverse_cdf(su, W): """Inverse CDF algorithm for a finite distribution. Parameters ---------- su: (M,) ndarray M sorted uniform variates (i.e. M ordered points in [0,1]). W: (N,) ndarray a vector of N normalized weights (>=0 and sum to one) Returns ------- A: (M,) ndarray a vector of M indices in range 0, ..., N-1 """ j = 0 s = W[0] M = su.shape[0] A = np.empty(M, 'int') for n in range(M): while su[n] > s: j += 1 s += W[j] A[n] = j return A
def uniform_spacings(N): """ Generate ordered uniform variates in O(N) time. Parameters ---------- N: int (>0) the expected number of uniform variates Returns ------- (N,) float ndarray the N ordered variates (ascending order) Note ---- This is equivalent to:: from numpy import random u = sort(random.rand(N)) but the line above has complexity O(N*log(N)), whereas the algorithm used here has complexity O(N). """ z = np.cumsum(-np.log(random.rand(N + 1))) return z[:-1] / z[-1]
def add(self, delta): """Increment weights: lw <-lw + delta. Parameters ---------- delta: (N,) array incremental log-weights """ if self.lw is None: return Weights(lw=delta) else: return Weights(lw=self.lw + delta)
def dequeue(self, k): """Outputs *k* draws from the multinomial distribution.""" if self.j + k <= self.M: out = self.A[self.j:(self.j + k)] self.j += k elif k <= self.M: out = np.empty(k, 'int') nextra = self.j + k - self.M out[:(k - nextra)] = self.A[self.j:] self.enqueue() out[(k - nextra):] = self.A[:nextra] self.j = nextra else: raise ValueError('MultinomialQueue: k must be <= M (the max \ capacity of the queue)') return out
def hilbert_array(xint): """Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices """ N, d = xint.shape h = np.zeros(N, int64) for n in range(N): h[n] = Hilbert_to_int(xint[n, :]) return h
def hilbert_sort(x): """Hilbert sort: sort vectors according to their Hilbert index. Parameters ---------- x : (N,) or (N, d) float numpy.ndarray N vectors in R^d Returns ------- A : (N,) int numpy.ndarray argsort (e.g. x[A[0], :] is the vector with smallest H index). Note ---- The vectors in `x` are first (a) standardized; and (b) applied a logistic transformation so as to lie in [0,1]^d. """ d = 1 if x.ndim == 1 else x.shape[1] if d == 1: return np.argsort(x, axis=0) xs = invlogit((x - np.mean(x, axis=0)) / np.std(x, axis=0)) maxint = np.floor(2**(62 / d)) xint = np.floor(xs * maxint).astype('int') return np.argsort(hilbert_array(xint))
def mean_sq_jump_dist(self, discard_frac=0.1): """Mean squared jumping distance estimated from chain. Parameters ---------- discard_frac: float fraction of iterations to discard at the beginning (as a burn-in) Returns ------- float """ discard = int(self.niter * discard_frac) return msjd(self.chain.theta[discard:])
def update(self, v): """Adds point v""" self.t += 1 g = self.gamma() self.mu = (1. - g) * self.mu + g * v mv = v - self.mu self.Sigma = ((1. - g) * self.Sigma + g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :])) try: self.L = cholesky(self.Sigma, lower=True) except LinAlgError: self.L = self.L0
def cartesian_lists(d): """ turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists Example ------- cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]} returns [ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ] """ return [{k: v for k, v in zip(d.keys(), args)} for args in itertools.product(*d.values())]
def cartesian_args(args, listargs, dictargs): """ Compute a list of inputs and outputs for a function with kw arguments. args: dict fixed arguments, e.g. {'x': 3}, then x=3 for all inputs listargs: dict arguments specified as a list; then the inputs should be the Cartesian products of these lists dictargs: dict same as above, except the key will be used in the output (see module doc for more explanation) """ ils = {k: [v, ] for k, v in args.items()} ils.update(listargs) ils.update({k: v.values() for k, v in dictargs.items()}) ols = listargs.copy() ols.update({k: v.keys() for k, v in dictargs.items()}) return cartesian_lists(ils), cartesian_lists(ols)
def worker(qin, qout, f): """Worker for muliprocessing. A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments, until the input queue is empty. """ while not qin.empty(): i, args = qin.get() qout.put((i, f(**args)))
def distribute_work(f, inputs, outputs=None, nprocs=1, out_key='output'): """ For each input i (a dict) in list **inputs**, evaluate f(**i) using multiprocessing if nprocs>1 The result has the same format as the inputs: a list of dicts, taken from outputs, and updated with f(**i). If outputs is None, it is set to inputs. """ if outputs is None: outputs = [ip.copy() for ip in inputs] if nprocs <= 0: nprocs += multiprocessing.cpu_count() # no multiprocessing if nprocs <= 1: return [add_to_dict(op, f(**ip), key=out_key) for ip, op in zip(inputs, outputs)] # multiprocessing queue_in = multiprocessing.Queue() queue_out = multiprocessing.Queue() procs = [multiprocessing.Process(target=worker, args=(queue_in, queue_out, f)) for _ in range(nprocs)] sent = [queue_in.put((i, args)) for i, args in enumerate(inputs)] [p.start() for p in procs] results = [queue_out.get() for _ in sent] for i, r in results: add_to_dict(outputs[i], r) [p.join() for p in procs] return outputs
def distinct_seeds(k): """ returns k distinct seeds for random number generation """ seeds = [] for _ in range(k): while True: s = random.randint(2**32 - 1) if s not in seeds: break seeds.append(s) return seeds
def multiplexer(f=None, nruns=1, nprocs=1, seeding=None, **args): """Evaluate a function for different parameters, optionally in parallel. Parameters ---------- f: function function f to evaluate, must take only kw arguments as inputs nruns: int number of evaluations of f for each set of arguments nprocs: int + if <=0, set to actual number of physical processors plus nprocs (i.e. -1 => number of cpus on your machine minus one) Default is 1, which means no multiprocessing seeding: bool (default: True if nruns > 1, False otherwise) whether we need to provide different seeds for RNGS **args: keyword arguments for function f. Note ---- see documentation of `utils` """ if not callable(f): raise ValueError('multiplexer: function f missing, or not callable') if seeding is None: seeding = (nruns > 1) # extra arguments (meant to be arguments for f) fixedargs, listargs, dictargs = {}, {}, {} listargs['run'] = list(range(nruns)) for k, v in args.items(): if isinstance(v, list): listargs[k] = v elif isinstance(v, dict): dictargs[k] = v else: fixedargs[k] = v # cartesian product inputs, outputs = cartesian_args(fixedargs, listargs, dictargs) for ip in inputs: ip.pop('run') # run is not an argument of f, just an id for output # distributing different seeds if seeding: seeds = distinct_seeds(len(inputs)) for ip, op, s in zip(inputs, outputs, seeds): ip['seed'] = s op['seed'] = s # the actual work happens here return distribute_work(f, inputs, outputs, nprocs=nprocs)
def reject_sv(m, s, y): """ Sample from N(m, s^2) times SV likelihood using rejection. SV likelihood (in x) corresponds to y ~ N(0, exp(x)). """ mp = m + 0.5 * s**2 * (-1. + y**2 * np.exp(-m)) ntries = 0 while True: ntries += 1 x = stats.norm.rvs(loc=mp, scale=s) u = stats.uniform.rvs() if np.log(u) < - 0.5 * y**2 * (np.exp(-x) - np.exp(-m) * (1. + m - x)): break if ntries > 1000: print('1000 failed attempt, m,s,y=%f, %f, %f' % (m, s, y)) break return x
def predmean(self, xp): """ Expectation of X_t given X_{t-1}=xp """ out = np.empty_like(xp) for i in range(3): out[:, i] = xp[:, i] + self.delta * xp[:, i+3] # position out[:, i+3] = xp[:, i+3] # velocity return out
def diff_ft(self, xt, yt): """First and second derivatives (wrt x_t) of log-density of Y_t|X_t=xt """ a, b = self.a, self.b ex = np.exp(a + np.matmul(b, xt)) # shape = (dy,) grad = (-np.sum(ex[:, np.newaxis] * b, axis=0) + np.sum(yt.flatten()[:, np.newaxis] * b, axis=0)) #TODO flatten hess = np.zeros((self.dx, self.dx)) for k in range(self.dy): hess -= ex[k] * np.outer(b[k,:], b[k,:]) return grad, hess
def approx_likelihood(self, yt): """ Gaussian approx of x_t --> f_t(y_t|x_t) """ x = np.zeros(self.dx) max_nb_attempts = 100 tol = 1e-3 for i in range(max_nb_attempts): grad, hess = self.diff_ft(x, yt) xnew = x - linalg.solve(hess, grad) # Newton Raphson if linalg.norm(xnew - x, ord=1) < tol: break x = xnew return (x, -hess)
def approx_post(self, xp, yt): """ approximates the law of X_t|Y_t,X_{t-1} returns a tuple of size 3: loc, cov, logpyt """ xmax, Q = self.approx_likelihood(yt) G = np.eye(self.dx) covY = linalg.inv(Q) pred = kalman.MeanAndCov(mean=self.predmean(xp), cov=self.SigX) return kalman.filter_step(G, covY, pred, xmax)
def simulate(self, T): """Simulate state and observation processes. Parameters ---------- T: int processes are simulated from time 0 to time T-1 Returns ------- x, y: lists lists of length T """ x = [] for t in range(T): law_x = self.PX0() if t == 0 else self.PX(t, x[-1]) x.append(law_x.rvs(size=1)) y = self.simulate_given_x(x) return x, y
def logpt(self, t, xp, x): """PDF of X_t|X_{t-1}=xp""" return self.ssm.PX(t, xp).logpdf(x)
def EXt(self, xp): """compute E[x_t|x_{t-1}]""" return (1. - self.rho) * self.mu + self.rho * xp
def posterior(self, x, sigma=1.): """Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed""" pr0 = 1. / self.scale**2 # prior precision prd = x.size / sigma**2 # data precision varp = 1. / (pr0 + prd) # posterior variance mu = varp * (pr0 * self.loc + prd * x.mean()) return Normal(loc=mu, scale=np.sqrt(varp))
def posterior(self, x): """Model is X_1,...,X_n ~ N(0, 1/theta), theta ~ Gamma(a, b)""" return Gamma(a=self.a + 0.5 * x.size, b=self.b + 0.5 * np.sum(x**2))
def posterior(self, x, s=1.): """Model is X_1,...,X_n ~ N(theta, s^2), theta~self, s fixed""" pr0 = 1. / self.sigma**2 # prior precision prd = x.size / s**2 # data precision varp = 1. / (pr0 + prd) # posterior variance mu = varp * (pr0 * self.mu + prd * x.mean()) return TruncNormal(mu=mu, sigma=np.sqrt(varp), a=self.a, b=self.b)
def ppf(self, u): """ Note: if dim(u) < self.dim, the remaining columns are filled with 0 Useful in case the distribution is partly degenerate """ N, du = u.shape if du < self.dim: z = np.zeros((N, self.dim)) z[:, :du] = stats.norm.ppf(u) else: z = stats.norm.ppf(u) return self.linear_transform(z)
def posterior(self, x, Sigma=None): """Posterior for model: X1, ..., Xn ~ N(theta, Sigma). Parameters ---------- x: (n, d) ndarray data Sigma: (d, d) ndarray (fixed) covariance matrix in the model """ n = x.shape[0] Sigma = np.eye(self.dim) if Sigma is None else Sigma Siginv = inv(Sigma) Qpost = inv(self.cov) + n * Siginv Sigpost = inv(Qpost) mupost = (np.matmul(Siginv, self.mean) + np.matmu(Siginv, np.sum(x, axis=0))) return MvNormal(loc=mupost, cov=Sigpost)
def psit(t, xp, x): """ score of the model (gradient of log-likelihood at theta=theta_0) """ if t == 0: return -0.5 / sigma0**2 + \ (0.5 * (1. - phi0**2) / sigma0**4) * (x - mu0)**2 else: return -0.5 / sigma0**2 + (0.5 / sigma0**4) * \ ((x - mu0) - phi0 * (xp - mu0))**2
def avg_n_nplusone(x): """ returns x[0]/2, (x[0]+x[1])/2, ... (x[-2]+x[-1])/2, x[-1]/2 """ y = np.zeros(1 + x.shape[0]) hx = 0.5 * x y[:-1] = hx y[1:] += hx return y
def interpoled_resampling(W, x): """Resampling based on an interpolated CDF, as described in Malik and Pitt. Parameters ---------- W: (N,) array weights x: (N,) array particles Returns ------- xrs: (N,) array the resampled particles """ N = W.shape[0] idx = np.argsort(x) xs = x[idx] ws = W[idx] cs = np.cumsum(avg_n_nplusone(ws)) u = random.rand(N) xrs = np.empty(N) where = np.searchsorted(cs, u) # costs O(N log(N)) but algorithm has O(N log(N)) complexity anyway for n in range(N): m = where[n] if m==0: xrs[n] = xs[0] elif m==N: xrs[n] = xs[-1] else: xrs[n] = interpol(cs[m-1], cs[m], xs[m-1], xs[m], u[n]) return xrs
def line_to_variables(source, line, inherit_permission, parent): """ Returns a list of variables declared in the provided line of code. The line of code should be provided as a string. """ vartype, kind, strlen, proto, rest = parse_type(line,parent.strings,parent.settings) attribs = [] intent = "" optional = False permission = inherit_permission parameter = False attribmatch = ATTRIBSPLIT_RE.match(rest) if attribmatch: attribstr = attribmatch.group(1).strip() declarestr = attribmatch.group(2).strip() tmp_attribs = ford.utils.paren_split(",",attribstr) for i in range(len(tmp_attribs)): tmp_attribs[i] = tmp_attribs[i].strip() if tmp_attribs[i].lower() == "public": permission = "public" elif tmp_attribs[i].lower() == "private": permission = "private" elif tmp_attribs[i].lower() == "protected": permission = "protected" elif tmp_attribs[i].lower() == "optional": optional = True elif tmp_attribs[i].lower() == "parameter": parameter = True elif tmp_attribs[i].lower().replace(' ','') == "intent(in)": intent = 'in' elif tmp_attribs[i].lower().replace(' ','') == "intent(out)": intent = 'out' elif tmp_attribs[i].lower().replace(' ','') == "intent(inout)": intent = 'inout' else: attribs.append(tmp_attribs[i]) else: declarestr = ATTRIBSPLIT2_RE.match(rest).group(2) declarations = ford.utils.paren_split(",",declarestr) varlist = [] for dec in declarations: dec = re.sub(" ","",dec) split = ford.utils.paren_split('=',dec) if len(split) > 1: name = split[0] if split[1][0] == '>': initial = split[1][1:] points = True else: initial = split[1] points = False else: name = dec.strip() initial = None points = False if initial: initial = COMMA_RE.sub(', ',initial) search_from = 0 while QUOTES_RE.search(initial[search_from:]): num = int(QUOTES_RE.search(initial[search_from:]).group()[1:-1]) old_string = NBSP_RE.sub('&nbsp;',parent.strings[num]) string = '' for i in range(len(old_string)): if old_string[i] == "\\" and (old_string[i+1] in '0123456789' or old_string[i+1] == 'g'): string += r'\\' elif old_string[i] == '(' and old_string[i+1] =='?': string += r'\(' else: string += old_string[i] initial = initial[0:search_from] + QUOTES_RE.sub(string,initial[search_from:],count=1) search_from += QUOTES_RE.search(initial[search_from:]).end(0) if proto: varlist.append(FortranVariable(name,vartype,parent,copy.copy(attribs),intent, optional,permission,parameter,kind,strlen,list(proto), [],points,initial)) else: varlist.append(FortranVariable(name,vartype,parent,copy.copy(attribs),intent, optional,permission,parameter,kind,strlen,proto, [],points,initial)) doc = [] docline = source.__next__() while docline[0:2] == "!" + parent.settings['docmark']: doc.append(docline[2:]) docline = source.__next__() source.pass_back(docline) for var in varlist: var.doc = doc return varlist
def parse_type(string,capture_strings,settings): """ Gets variable type, kind, length, and/or derived-type attributes from a variable declaration. """ typestr = '' for vtype in settings['extra_vartypes']: typestr = typestr + '|' + vtype var_type_re = re.compile(VAR_TYPE_STRING + typestr,re.IGNORECASE) match = var_type_re.match(string) if not match: raise Exception("Invalid variable declaration: {}".format(string)) vartype = match.group().lower() if DOUBLE_PREC_RE.match(vartype): vartype = "double precision" rest = string[match.end():].strip() kindstr = ford.utils.get_parens(rest) rest = rest[len(kindstr):].strip() if len(kindstr) < 3 and vartype != "type" and vartype != "class" and not kindstr.startswith('*'): return (vartype, None, None, None, rest) match = VARKIND_RE.search(kindstr) if match: if match.group(1): star = False args = match.group(1).strip() else: star = True args = match.group(2).strip() if args.startswith('('): args = args[1:-1].strip() args = re.sub("\s","",args) if vartype == "type" or vartype == "class" or vartype == "procedure": PROTO_RE = re.compile("(\*|\w+)\s*(?:\((.*)\))?") try: proto = list(PROTO_RE.match(args).groups()) if not proto[1]: proto[1] = '' except: raise Exception("Bad type, class, or procedure prototype specification: {}".format(args)) return (vartype, None, None, proto, rest) elif vartype == "character": if star: return (vartype, None, args, None, rest) else: kind = None length = None if KIND_RE.search(args): kind = KIND_RE.sub("",args) try: match = QUOTES_RE.search(kind) num = int(match.group()[1:-1]) kind = QUOTES_RE.sub(captured_strings[num],kind) except: pass elif LEN_RE.search(args): length = LEN_RE.sub("",args) else: length = args return (vartype, kind, length, None, rest) else: kind = KIND_RE.sub("",args) return (vartype, kind, None, None, rest) raise Exception("Bad declaration of variable type {}: {}".format(vartype,string))
def sort_items(self,items,args=False): """ Sort the `self`'s contents, as contained in the list `items` as specified in `self`'s meta-data. """ if self.settings['sort'].lower() == 'src': return def alpha(i): return i.name def permission(i): if args: if i.intent == 'in': return 'b' if i.intent == 'inout': return 'c' if i.intent == 'out': return 'd' if i.intent == '': return 'e' perm = getattr(i, 'permission', '') if perm == 'public': return 'b' if perm == 'protected': return 'c' if perm == 'private': return 'd' return 'a' def permission_alpha(i): return permission(i) + '-' + i.name def itype(i): if i.obj == 'variable': retstr = i.vartype if retstr == 'class': retstr = 'type' if i.kind: retstr = retstr + '-' + str(i.kind) if i.strlen: retstr = retstr + '-' + str(i.strlen) if i.proto: retstr = retstr + '-' + i.proto[0] return retstr elif i.obj == 'proc': if i.proctype != 'Function': return i.proctype.lower() else: return i.proctype.lower() + '-' + itype(i.retvar) else: return i.obj def itype_alpha(i): return itype(i) + '-' + i.name if self.settings['sort'].lower() == 'alpha': items.sort(key=alpha) elif self.settings['sort'].lower() == 'permission': items.sort(key=permission) elif self.settings['sort'].lower() == 'permission-alpha': items.sort(key=permission_alpha) elif self.settings['sort'].lower() == 'type': items.sort(key=itype) elif self.settings['sort'].lower() == 'type-alpha': items.sort(key=itype_alpha)
def contents_size(self): ''' Returns the number of different categories to be shown in the contents side-bar in the HTML documentation. ''' count = 0 if hasattr(self,'variables'): count += 1 if hasattr(self,'types'): count += 1 if hasattr(self,'modules'): count += 1 if hasattr(self,'submodules'): count += 1 if hasattr(self,'subroutines'): count += 1 if hasattr(self,'modprocedures'): count += 1 if hasattr(self,'functions'): count += 1 if hasattr(self,'interfaces'): count += 1 if hasattr(self,'absinterfaces'): count += 1 if hasattr(self,'programs'): count += 1 if hasattr(self,'boundprocs'): count += 1 if hasattr(self,'finalprocs'): count += 1 if hasattr(self,'enums'): count += 1 if hasattr(self,'procedure'): count += 1 if hasattr(self,'constructor'): count += 1 if hasattr(self,'modfunctions'): count += 1 if hasattr(self,'modsubroutines'): count += 1 if hasattr(self,'modprocs'): count += 1 if getattr(self,'src',None): count += 1 return count
def markdown(self,md,project): """ Process the documentation with Markdown to produce HTML. """ if len(self.doc) > 0: if len(self.doc) == 1 and ':' in self.doc[0]: words = self.doc[0].split(':')[0].strip() if words.lower() not in ['author','date','license','version','category','summary','deprecated','display','graph']: self.doc.insert(0,'') self.doc.append('') self.doc = '\n'.join(self.doc) self.doc = md.convert(self.doc) self.meta = md.Meta md.reset() md.Meta = {} else: if self.settings['warn'].lower() == 'true' and self.obj != 'sourcefile' and self.obj != 'genericsource': #TODO: Add ability to print line number where this item is in file print('Warning: Undocumented {} {} in file {}'.format(self.obj, self.name, self.hierarchy[0].name)) self.doc = "" self.meta = {} if self.parent: self.display = self.parent.display #~ print (self.meta) for key in self.meta: #~ print(key, self.meta[key]) if key == 'display': tmp = [ item.lower() for item in self.meta[key] ] if type(self) == FortranSourceFile: while 'none' in tmp: tmp.remove('none') if len(tmp) == 0: pass elif 'none' in tmp: self.display = [] elif 'public' not in tmp and 'private' not in tmp and 'protected' not in tmp: pass else: self.display = tmp elif len(self.meta[key]) == 1: self.meta[key] = self.meta[key][0] elif key == 'summary': self.meta[key] = '\n'.join(self.meta[key]) if hasattr(self,'num_lines'): self.meta['num_lines'] = self.num_lines self.doc = ford.utils.sub_macros(ford.utils.sub_notes(self.doc),self.base_url) if 'summary' in self.meta: self.meta['summary'] = md.convert(self.meta['summary']) self.meta['summary'] = ford.utils.sub_macros(ford.utils.sub_notes(self.meta['summary']),self.base_url) elif PARA_CAPTURE_RE.search(self.doc): if self.get_url() == None: # There is no stand-alone webpage for this item (e.g., # an internal routine in a routine, so make the whole # doc blob appear, without the link to "more..." self.meta['summary'] = self.doc else: self.meta['summary'] = PARA_CAPTURE_RE.search(self.doc).group() else: self.meta['summary'] = '' if self.meta['summary'].strip() != self.doc.strip(): self.meta['summary'] += '<a href="{}" class="pull-right"><emph>Read more&hellip;</emph></a>'.format(self.get_url()) if 'graph' not in self.meta: self.meta['graph'] = self.settings['graph'] else: self.meta['graph'] = self.meta['graph'].lower() if 'graph_maxdepth' not in self.meta: self.meta['graph_maxdepth'] = self.settings['graph_maxdepth'] if 'graph_maxnodes' not in self.meta: self.meta['graph_maxnodes'] = self.settings['graph_maxnodes'] if self.obj == 'proc' or self.obj == 'type' or self.obj == 'program': if 'source' not in self.meta: self.meta['source'] = self.settings['source'].lower() else: self.meta['source'] = self.meta['source'].lower() if self.meta['source'] == 'true': if self.obj == 'proc': obj = self.proctype.lower() else: obj = self.obj regex = re.compile(self.SRC_CAPTURE_STR.format(obj,self.name), re.IGNORECASE|re.DOTALL|re.MULTILINE) match = regex.search(self.hierarchy[0].raw_src) if match: self.src = highlight(match.group(),FortranLexer(),HtmlFormatter()) else: self.src = '' if self.settings['warn'].lower() == 'true': print('Warning: Could not extract source code for {} {} in file {}'.format(self.obj, self.name, self.hierarchy[0].name)) if self.obj == 'proc': if 'proc_internals' not in self.meta: self.meta['proc_internals'] = self.settings['proc_internals'].lower() else: self.meta['proc_internals'] = self.meta['proc_internals'].lower() # Create Markdown for item in self.iterator('variables', 'modules', 'submodules', 'common', 'subroutines', 'modprocedures', 'functions', 'interfaces', 'absinterfaces', 'types', 'programs', 'blockdata', 'boundprocs', 'finalprocs', 'args', 'enums'): if isinstance(item, FortranBase): item.markdown(md, project) if hasattr(self,'retvar'): if self.retvar: if isinstance(self.retvar, FortranBase): self.retvar.markdown(md, project) if hasattr(self,'procedure'): if isinstance(self.procedure, FortranBase): self.procedure.markdown(md, project) return
def sort(self): ''' Sorts components of the object. ''' if hasattr(self,'variables'): sort_items(self,self.variables) if hasattr(self,'modules'): sort_items(self,self.modules) if hasattr(self,'submodules'): sort_items(self,self.submodules) if hasattr(self,'common'): sort_items(self,self.common) if hasattr(self,'subroutines'): sort_items(self,self.subroutines) if hasattr(self,'modprocedures'): sort_items(self,self.modprocedures) if hasattr(self,'functions'): sort_items(self,self.functions) if hasattr(self,'interfaces'): sort_items(self,self.interfaces) if hasattr(self,'absinterfaces'): sort_items(self,self.absinterfaces) if hasattr(self,'types'): sort_items(self,self.types) if hasattr(self,'programs'): sort_items(self,self.programs) if hasattr(self,'blockdata'): sort_items(self,self.blockdata) if hasattr(self,'boundprocs'): sort_items(self,self.boundprocs) if hasattr(self,'finalprocs'): sort_items(self,self.finalprocs) if hasattr(self,'args'): #sort_items(self.args,args=True) pass
def make_links(self, project): """ Process intra-site links to documentation of other parts of the program. """ self.doc = ford.utils.sub_links(self.doc,project) if 'summary' in self.meta: self.meta['summary'] = ford.utils.sub_links(self.meta['summary'],project) # Create links in the project for item in self.iterator('variables', 'types', 'enums', 'modules', 'submodules', 'subroutines', 'functions', 'interfaces', 'absinterfaces', 'programs', 'boundprocs', 'args', 'bindings'): if isinstance(item, FortranBase): item.make_links(project) if hasattr(self, 'retvar'): if self.retvar: if isinstance(self.retvar, FortranBase): self.retvar.make_links(project) if hasattr(self, 'procedure'): if isinstance(self.procedure, FortranBase): self.procedure.make_links(project)
def iterator(self, *argv): """ Iterator returning any list of elements via attribute lookup in `self` This iterator retains the order of the arguments """ for arg in argv: if hasattr(self, arg): for item in getattr(self, arg): yield item
def prune(self): """ Remove anything which shouldn't be displayed. """ def to_include(obj): inc = obj.permission in self.display if self.settings['hide_undoc'].lower() == 'true' and not obj.doc: inc = False return inc if self.obj == 'proc' and self.meta['proc_internals'] == 'false': self.functions = [] self.subroutines = [] self.types = [] self.interfaces = [] self.absinterfaces = [] self.variables = [] else: self.functions = [obj for obj in self.functions if to_include(obj)] self.subroutines = [obj for obj in self.subroutines if to_include(obj)] self.types = [obj for obj in self.types if to_include(obj)] self.interfaces = [obj for obj in self.interfaces if to_include(obj)] self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)] self.variables = [obj for obj in self.variables if to_include(obj)] if hasattr(self,'modprocedures'): self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)] if hasattr(self,'modsubroutines'): self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)] if hasattr(self,'modfunctions'): self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)] # Recurse for obj in self.absinterfaces: obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.prune()
def get_used_entities(self,use_specs): """ Returns the entities which are imported by a use statement. These are contained in dicts. """ if len(use_specs.strip()) == 0: return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars) only = bool(self.ONLY_RE.match(use_specs)) use_specs = self.ONLY_RE.sub('',use_specs) ulist = self.SPLIT_RE.split(use_specs) ulist[-1] = ulist[-1].strip() uspecs = {} for item in ulist: match = self.RENAME_RE.search(item) if match: uspecs[match.group(1).lower()] = match.group(2) else: uspecs[item.lower()] = item ret_procs = {} ret_absints = {} ret_types = {} ret_vars = {} for name, obj in self.pub_procs.items(): name = name.lower() if only: if name in uspecs: ret_procs[name] = obj else: ret_procs[name] = obj for name, obj in self.pub_absints.items(): name = name.lower() if only: if name in uspecs: ret_absints[name] = obj else: ret_absints[name] = obj for name, obj in self.pub_types.items(): name = name.lower() if only: if name in uspecs: ret_types[name] = obj else: ret_types[name] = obj for name, obj in self.pub_vars.items(): name = name.lower() if only: if name in uspecs: ret_vars[name] = obj else: ret_vars[name] = obj return (ret_procs,ret_absints,ret_types,ret_vars)
def prune(self): """ Remove anything which shouldn't be displayed. """ self.boundprocs = [ obj for obj in self.boundprocs if obj.permission in self.display ] self.variables = [ obj for obj in self.variables if obj.permission in self.display ] for obj in self.boundprocs + self.variables: obj.visible = True
def get_name(self,item): """ Return the name for this item registered with this NameSelector. If no name has previously been registered, then generate a new one. """ if not isinstance(item,ford.sourceform.FortranBase): raise Exception('{} is not of a type derived from FortranBase'.format(str(item))) if item in self._items: return self._items[item] else: if item.get_dir() not in self._counts: self._counts[item.get_dir()] = {} if item.name in self._counts[item.get_dir()]: num = self._counts[item.get_dir()][item.name] + 1 else: num = 1 self._counts[item.get_dir()][item.name] = num name = item.name.lower().replace('<','lt') # name is already lower name = name.replace('>','gt') name = name.replace('/','SLASH') if name == '': name = '__unnamed__' if num > 1: name = name + '~' + str(num) self._items[item] = name return name
def initialize(): """ Method to parse and check configurations of FORD, get the project's global documentation, and create the Markdown reader. """ try: import multiprocessing ncpus = '{0}'.format(multiprocessing.cpu_count()) except (ImportError, NotImplementedError): ncpus = '0' # Setup the command-line options and parse them. parser = argparse.ArgumentParser(description="Document a program or library written in modern Fortran. Any command-line options over-ride those specified in the project file.") parser.add_argument("project_file",help="file containing the description and settings for the project", type=argparse.FileType('r')) parser.add_argument("-d","--src_dir",action="append",help='directories containing all source files for the project') parser.add_argument("-p","--page_dir",help="directory containing the optional page tree describing the project") parser.add_argument("-o","--output_dir",help="directory in which to place output files") parser.add_argument("-s","--css",help="custom style-sheet for the output") parser.add_argument("-r","--revision",dest="revision",help="Source code revision the project to document") parser.add_argument("--exclude",action="append",help="any files which should not be included in the documentation") parser.add_argument("--exclude_dir",action="append",help="any directories whose contents should not be included in the documentation") parser.add_argument("-e","--extensions",action="append",help="extensions which should be scanned for documentation (default: f90, f95, f03, f08)") parser.add_argument("-m","--macro",action="append",help="preprocessor macro (and, optionally, its value) to be applied to files in need of preprocessing.") parser.add_argument("-w","--warn",dest='warn',action='store_true', help="display warnings for undocumented items") parser.add_argument("--no-search",dest='search',action='store_false', help="don't process documentation to produce a search feature") parser.add_argument("-q","--quiet",dest='quiet',action='store_true', help="do not print any description of progress") parser.add_argument("-V", "--version", action="version", version="{}, version {}".format(__appname__,__version__)) parser.add_argument("--debug",dest="dbg",action="store_true", help="display traceback if fatal exception occurs") parser.add_argument("-I","--include",action="append", help="any directories which should be searched for include files") # Get options from command-line args = parser.parse_args() # Set up Markdown reader md_ext = ['markdown.extensions.meta','markdown.extensions.codehilite', 'markdown.extensions.extra',MathExtension(),'md_environ.environ'] md = markdown.Markdown(extensions=md_ext, output_format="html5", extension_configs={}) # Read in the project-file. This will contain global documentation (which # will appear on the homepage) as well as any information about the project # and settings for generating the documentation. proj_docs = args.project_file.read() md.convert(proj_docs) # Remake the Markdown object with settings parsed from the project_file if 'md_base_dir' in md.Meta: md_base = md.Meta['md_base_dir'][0] else: md_base = os.path.dirname(args.project_file.name) md_ext.append('markdown_include.include') if 'md_extensions' in md.Meta: md_ext.extend(md.Meta['md_extensions']) md = markdown.Markdown(extensions=md_ext, output_format="html5", extension_configs={'markdown_include.include': {'base_path': md_base}}) md.reset() # Re-read the project file proj_docs = md.convert(proj_docs) proj_data = md.Meta md.reset() # Get the default options, and any over-rides, straightened out options = ['src_dir','extensions','fpp_extensions','fixed_extensions', 'output_dir','css','exclude', 'project','author','author_description','author_pic', 'summary','github','bitbucket','facebook','twitter', 'google_plus','linkedin','email','website','project_github', 'project_bitbucket','project_website','project_download', 'project_sourceforge','project_url','display','hide_undoc','version', 'year','docmark','predocmark','docmark_alt','predocmark_alt', 'media_dir','favicon','warn','extra_vartypes','page_dir', 'incl_src', 'source','exclude_dir','macro','include','preprocess','quiet', 'search','lower','sort','extra_mods','dbg','graph', 'graph_maxdepth', 'graph_maxnodes', 'license','extra_filetypes','preprocessor','creation_date', 'print_creation_date','proc_internals','coloured_edges', 'graph_dir','gitter_sidecar','mathjax_config','parallel', 'revision', 'fixed_length_limit'] defaults = {'src_dir': ['./src'], 'extensions': ['f90','f95','f03','f08','f15'], 'fpp_extensions': ['F90','F95','F03','F08','F15','F','FOR'], 'fixed_extensions': ['f','for','F','FOR'], 'output_dir': './doc', 'project': 'Fortran Program', 'project_url': '', 'display': ['public','protected'], 'hide_undoc': 'false', 'year': date.today().year, 'exclude': [], 'exclude_dir': [], 'docmark': '!', 'docmark_alt': '*', 'predocmark': '>', 'predocmark_alt': '|', 'favicon': 'default-icon', 'extra_vartypes': [], 'incl_src': 'true', 'source': 'false', 'macro': [], 'include': [], 'preprocess': 'true', 'preprocessor': '', 'proc_internals': 'false', 'warn': 'false', 'quiet': 'false', 'search': 'true', 'lower': 'false', 'sort': 'src', 'extra_mods': [], 'dbg': True, 'graph': 'false', 'graph_maxdepth': '10000', 'graph_maxnodes': '1000000000', 'license': '', 'extra_filetypes': [], 'creation_date': '%Y-%m-%dT%H:%M:%S.%f%z', 'print_creation_date': False, 'coloured_edges': 'false', 'parallel': ncpus, 'fixed_length_limit': 'true', } listopts = ['extensions','fpp_extensions','fixed_extensions','display', 'extra_vartypes','src_dir','exclude','exclude_dir', 'macro','include','extra_mods','extra_filetypes'] # Evaluate paths relative to project file location if args.warn: args.warn = 'true' else: del args.warn if args.quiet: args.quiet = 'true' else: del args.quiet if not args.search: args.search = 'false' else: del args.search for option in options: if hasattr(args,option) and getattr(args,option): proj_data[option] = getattr(args,option) elif option in proj_data: # Think if there is a safe way to evaluate any expressions found in this list #proj_data[option] = proj_data[option] if option not in listopts: proj_data[option] = '\n'.join(proj_data[option]) elif option in defaults: proj_data[option] = defaults[option] base_dir = os.path.abspath(os.path.dirname(args.project_file.name)) proj_data['base_dir'] = base_dir for var in ['src_dir','exclude_dir','include']: if var in proj_data: proj_data[var] = [os.path.normpath(os.path.join(base_dir,os.path.expanduser(os.path.expandvars(p)))) for p in proj_data[var]] for var in ['page_dir','output_dir','graph_dir','media_dir','css','mathjax_config']: if var in proj_data: proj_data[var] = os.path.normpath(os.path.join(base_dir,os.path.expanduser(os.path.expandvars(proj_data[var])))) if proj_data['favicon'].strip() != defaults['favicon']: proj_data['favicon'] = os.path.normpath(os.path.join(base_dir,os.path.expanduser(os.path.expandvars(proj_data['favicon'])))) proj_data['display'] = [ item.lower() for item in proj_data['display'] ] proj_data['incl_src'] = proj_data['incl_src'].lower() proj_data['creation_date'] = datetime.now().strftime(proj_data['creation_date']) relative = (proj_data['project_url'] == '') proj_data['relative'] = relative proj_data['extensions'] += [ext for ext in proj_data['fpp_extensions'] if ext not in proj_data['extensions']] # Parse file extensions and comment characters for extra filetypes extdict = {} for ext in proj_data['extra_filetypes']: sp = ext.split() if len(sp) < 2: continue if (len(sp)==2): extdict[sp[0]] = (sp[1]) # (comment_char) only else: extdict[sp[0]] = (sp[1],sp[2]) # (comment_char and lexer_str) proj_data['extra_filetypes'] = extdict # Make sure no src_dir is contained within output_dir for projdir in proj_data['src_dir']: proj_path = ford.utils.split_path(projdir) out_path = ford.utils.split_path(proj_data['output_dir']) for directory in out_path: if len(proj_path) == 0: break if directory == proj_path[0]: proj_path.remove(directory) else: break else: print('Error: directory containing source-code {} a subdirectory of output directory {}.'.format(proj_data['output_dir'],projdir)) sys.exit(1) # Check that none of the docmarks are the same if proj_data['docmark'] == proj_data['predocmark'] != '': print('Error: docmark and predocmark are the same.') sys.exit(1) if proj_data['docmark'] == proj_data['docmark_alt'] != '': print('Error: docmark and docmark_alt are the same.') sys.exit(1) if proj_data['docmark'] == proj_data['predocmark_alt'] != '': print('Error: docmark and predocmark_alt are the same.') sys.exit(1) if proj_data['docmark_alt'] == proj_data['predocmark'] != '': print('Error: docmark_alt and predocmark are the same.') sys.exit(1) if proj_data['docmark_alt'] == proj_data['predocmark_alt'] != '': print('Error: docmark_alt and predocmark_alt are the same.') sys.exit(1) if proj_data['predocmark'] == proj_data['predocmark_alt'] != '': print('Error: predocmark and predocmark_alt are the same.') sys.exit(1) # Add gitter sidecar if specified in metadata if 'gitter_sidecar' in proj_data: proj_docs += ''' <script> ((window.gitter = {{}}).chat = {{}}).options = {{ room: '{}' }}; </script> <script src="https://sidecar.gitter.im/dist/sidecar.v1.js" async defer></script> '''.format(proj_data['gitter_sidecar'].strip()) # Handle preprocessor: if proj_data['preprocess'].lower() == 'true': if proj_data['preprocessor']: preprocessor = proj_data['preprocessor'].split() else: preprocessor = ['cpp','-traditional-cpp','-E', '-D__GFORTRAN__'] # Check whether preprocessor works (reading nothing from stdin) try: devnull = open(os.devnull) subprocess.Popen(preprocessor, stdin=devnull, stdout=devnull, stderr=devnull).communicate() except OSError as ex: print('Warning: Testing preprocessor failed') print(' Preprocessor command: {}'.format(preprocessor)) print(' Exception: {}'.format(ex)) print(' -> Preprocessing turned off') proj_data['preprocess'] = 'false' else: proj_data['preprocess'] = 'true' proj_data['preprocessor'] = preprocessor # Get correct license try: proj_data['license'] = LICENSES[proj_data['license'].lower()] except KeyError: print('Warning: license "{}" not recognized.'.format(proj_data['license'])) proj_data['license'] = '' # Return project data, docs, and the Markdown reader md.reset() md.Meta = {} return (proj_data, proj_docs, md)
def main(proj_data,proj_docs,md): """ Main driver of FORD. """ if proj_data['relative']: proj_data['project_url'] = '.' # Parse the files in your project project = ford.fortran_project.Project(proj_data) if len(project.files) < 1: print("Error: No source files with appropriate extension found in specified directory.") sys.exit(1) # Convert the documentation from Markdown to HTML. Make sure to properly # handle LateX and metadata. if proj_data['relative']: project.markdown(md,'..') else: project.markdown(md,proj_data['project_url']) project.correlate() if proj_data['relative']: project.make_links('..') else: project.make_links(proj_data['project_url']) # Convert summaries and descriptions to HTML if proj_data['relative']: ford.sourceform.set_base_url('.') if 'summary' in proj_data: proj_data['summary'] = md.convert(proj_data['summary']) proj_data['summary'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['summary']),proj_data['project_url']),project) if 'author_description' in proj_data: proj_data['author_description'] = md.convert(proj_data['author_description']) proj_data['author_description'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['author_description']),proj_data['project_url']),project) proj_docs_ = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_docs),proj_data['project_url']),project) # Process any pages if 'page_dir' in proj_data: page_tree = ford.pagetree.get_page_tree(os.path.normpath(proj_data['page_dir']),md) print() else: page_tree = None proj_data['pages'] = page_tree # Produce the documentation using Jinja2. Output it to the desired location # and copy any files that are needed (CSS, JS, images, fonts, source files, # etc.) docs = ford.output.Documentation(proj_data,proj_docs_,project,page_tree) docs.writeout() print('') return 0
def convertToFree(stream, length_limit=True): """Convert stream from fixed source form to free source form.""" linestack = [] for line in stream: convline = FortranLine(line, length_limit) if convline.is_regular: if convline.isContinuation and linestack: linestack[0].continueLine() for l in linestack: yield str(l) linestack = [] linestack.append(convline) for l in linestack: yield str(l)
def continueLine(self): """Insert line continuation symbol at end of line.""" if not (self.isLong and self.is_regular): self.line_conv = self.line_conv.rstrip() + " &\n" else: temp = self.line_conv[:72].rstrip() + " &" self.line_conv = temp.ljust(72) + self.excess_line
def id_mods(obj,modlist,intrinsic_mods={},submodlist=[]): """ Match USE statements up with the right modules """ for i in range(len(obj.uses)): for candidate in modlist: if obj.uses[i][0].lower() == candidate.name.lower(): obj.uses[i] = [candidate, obj.uses[i][1]] break else: if obj.uses[i][0].lower() in intrinsic_mods: obj.uses[i] = [intrinsic_mods[obj.uses[i][0].lower()], obj.uses[i][1]] continue if getattr(obj,'ancestor',None): for submod in submodlist: if obj.ancestor.lower() == submod.name.lower(): obj.ancestor = submod break if hasattr(obj,'ancestor_mod'): for mod in modlist: if obj.ancestor_mod.lower() == mod.name.lower(): obj.ancestor_mod = mod break for modproc in getattr(obj,'modprocedures',[]): id_mods(modproc,modlist,intrinsic_mods) for func in getattr(obj,'functions',[]): id_mods(func,modlist,intrinsic_mods) for subroutine in getattr(obj,'subroutines',[]): id_mods(subroutine,modlist,intrinsic_mods)
def allfiles(self): """ Instead of duplicating files, it is much more efficient to create the itterator on the fly """ for f in self.files: yield f for f in self.extra_files: yield f
def correlate(self): """ Associates various constructs with each other. """ print("Correlating information from different parts of your project...") non_local_mods = INTRINSIC_MODS for item in self.settings['extra_mods']: i = item.index(':') if i < 0: print('Warning: could not parse extra modules ""'.format(item)) continue name = item[:i].strip() url = item[i+1:].strip() non_local_mods[name.lower()] = '<a href="{}">{}</a>'.format(url,name) # Match USE statements up with the right modules for s in self.modules: id_mods(s, self.modules, non_local_mods, self.submodules) for s in self.procedures: id_mods(s, self.modules, non_local_mods, self.submodules) for s in self.programs: id_mods(s, self.modules, non_local_mods, self.submodules) for s in self.submodules: id_mods(s, self.modules, non_local_mods, self.submodules) for s in self.blockdata: id_mods(s, self.modules, non_local_mods, self.submodules) # Get the order to process other correlations with deplist = {} def get_deps(item): uselist = [m[0] for m in item.uses] for proc in getattr(item,'subroutines',[]): uselist.extend(get_deps(proc)) for proc in getattr(item,'functions',[]): uselist.extend(get_deps(proc)) for proc in getattr(item,'modprocedures',[]): uselist.extend(get_deps(proc)) return uselist for mod in self.modules: uselist = get_deps(mod) uselist = [m for m in uselist if type(m) == ford.sourceform.FortranModule] deplist[mod] = set(uselist) mod.deplist = uselist for mod in self.submodules: if type(mod.ancestor_mod) is ford.sourceform.FortranModule: uselist = get_deps(mod) uselist = [m for m in uselist if type(m) == ford.sourceform.FortranModule] if mod.ancestor: if type(mod.ancestor) is ford.sourceform.FortranSubmodule: uselist.insert(0,mod.ancestor) elif self.settings['warn'].lower() == 'true': print('Warning: could not identify parent SUBMODULE of SUBMODULE ' + mod.name) else: uselist.insert(0,mod.ancestor_mod) mod.deplist = uselist deplist[mod] = set(uselist) elif self.settings['warn'].lower() == 'true': print('Warning: could not identify parent MODULE of SUBMODULE ' + mod.name) # Get dependencies for programs and top-level procedures as well, # if dependency graphs are to be produced if self.settings['graph'].lower() == 'true': for proc in self.procedures: proc.deplist = set([m for m in get_deps(proc) if type(m) == ford.sourceform.FortranModule]) for prog in self.programs: prog.deplist = set([m for m in get_deps(prog) if type(m) == ford.sourceform.FortranModule]) for block in self.blockdata: block.deplist = set([m for m in get_deps(block) if type(m) == ford.sourceform.FortranModule]) ranklist = toposort.toposort_flatten(deplist) for proc in self.procedures: if proc.parobj == 'sourcefile': ranklist.append(proc) ranklist.extend(self.programs) ranklist.extend(self.blockdata) # Perform remaining correlations for the project for container in ranklist: if type(container) != str: container.correlate(self) for container in ranklist: if type(container) != str: container.prune() if self.settings['project_url'] == '.': url = '..' else: url = self.settings['project_url'] for sfile in self.files: for module in sfile.modules: for function in module.functions: self.procedures.append(function) for subroutine in module.subroutines: self.procedures.append(subroutine) for interface in module.interfaces: self.procedures.append(interface) for absint in module.absinterfaces: self.absinterfaces.append(absint) for dtype in module.types: self.types.append(dtype) for module in sfile.submodules: for function in module.functions: self.procedures.append(function) for subroutine in module.subroutines: self.procedures.append(subroutine) for function in module.modfunctions: self.submodprocedures.append(function) for subroutine in module.modsubroutines: self.submodprocedures.append(subroutine) for modproc in module.modprocedures: self.submodprocedures.append(modproc) for interface in module.interfaces: self.procedures.append(interface) for absint in module.absinterfaces: self.absinterfaces.append(absint) for dtype in module.types: self.types.append(dtype) for program in sfile.programs: for function in program.functions: self.procedures.append(function) for subroutine in program.subroutines: self.procedures.append(subroutine) for interface in program.interfaces: self.procedures.append(interface) for absint in program.absinterfaces: self.absinterfaces.append(absint) for dtype in program.types: self.types.append(dtype) for block in sfile.blockdata: for dtype in block.types: self.types.append(dtype) def sum_lines(*argv, **kwargs): """ Wrapper for minimizing memory consumption """ routine = kwargs.get('func', 'num_lines') n = 0 for arg in argv: for item in arg: n += getattr(item, routine) return n self.mod_lines = sum_lines(self.modules, self.submodules) self.proc_lines = sum_lines(self.procedures) self.file_lines = sum_lines(self.files) self.type_lines = sum_lines(self.types) self.type_lines_all = sum_lines(self.types, func='num_lines_all') self.absint_lines = sum_lines(self.absinterfaces) self.prog_lines = sum_lines(self.programs) self.block_lines = sum_lines(self.blockdata) print()
def markdown(self,md,base_url='..'): """ Process the documentation with Markdown to produce HTML. """ print("\nProcessing documentation comments...") ford.sourceform.set_base_url(base_url) if self.settings['warn'].lower() == 'true': print() for src in self.allfiles: src.markdown(md, self)
def make_links(self,base_url='..'): """ Substitute intrasite links to documentation for other parts of the program. """ ford.sourceform.set_base_url(base_url) for src in self.allfiles: src.make_links(self)
def make_srcdir_list(self,exclude_dirs): """ Like os.walk, except that: a) directories listed in exclude_dir are excluded with all their subdirectories b) absolute paths are returned """ srcdir_list = [] for topdir in self.topdirs: srcdir_list.append(topdir) srcdir_list += self.recursive_dir_list(topdir,exclude_dirs) return srcdir_list
def sub_notes(docs): """ Substitutes the special controls for notes, warnings, todos, and bugs with the corresponding div. """ def substitute(match): ret = "</p><div class=\"alert alert-{}\" role=\"alert\"><h4>{}</h4>" \ "<p>{}</p></div>".format(NOTE_TYPE[match.group(1).lower()], match.group(1).capitalize(), match.group(2)) if len(match.groups()) >= 4 and not match.group(4): ret += '\n<p>' return ret for regex in NOTE_RE: docs = regex.sub(substitute,docs) return docs
def get_parens(line,retlevel=0,retblevel=0): """ By default akes a string starting with an open parenthesis and returns the portion of the string going to the corresponding close parenthesis. If retlevel != 0 then will return when that level (for parentheses) is reached. Same for retblevel. """ if len(line) == 0: return line parenstr = '' level = 0 blevel = 0 for char in line: if char == '(': level += 1 elif char == ')': level -= 1 elif char == '[': blevel += 1 elif char == ']': blevel -= 1 elif (char.isalpha() or char == '_' or char == ':' or char == ',' or char == ' ') and level == retlevel and blevel == retblevel: return parenstr parenstr = parenstr + char if level == retlevel and blevel == retblevel: return parenstr raise Exception("Couldn't parse parentheses: {}".format(line))
def paren_split(sep,string): """ Splits the string into pieces divided by sep, when sep is outside of parentheses. """ if len(sep) != 1: raise Exception("Separation string must be one character long") retlist = [] level = 0 blevel = 0 left = 0 for i in range(len(string)): if string[i] == "(": level += 1 elif string[i] == ")": level -= 1 elif string[i] == "[": blevel += 1 elif string[i] == "]": blevel -= 1 elif string[i] == sep and level == 0 and blevel == 0: retlist.append(string[left:i]) left = i+1 retlist.append(string[left:]) return retlist
def quote_split(sep,string): """ Splits the strings into pieces divided by sep, when sep in not inside quotes. """ if len(sep) != 1: raise Exception("Separation string must be one character long") retlist = [] squote = False dquote = False left = 0 i = 0 while i < len(string): if string[i] == '"' and not dquote: if not squote: squote = True elif (i+1) < len(string) and string[i+1] == '"': i += 1 else: squote = False elif string[i] == "'" and not squote: if not dquote: dquote = True elif (i+1) < len(string) and string[i+1] == "'": i += 1 else: dquote = False elif string[i] == sep and not dquote and not squote: retlist.append(string[left:i]) left = i + 1 i += 1 retlist.append(string[left:]) return retlist
def split_path(path): ''' Splits the argument into its constituent directories and returns them as a list. ''' def recurse_path(path,retlist): if len(retlist) > 100: fullpath = os.path.join(*([ path, ] + retlist)) print("Directory '{}' contains too many levels".format(fullpath)) exit(1) head, tail = os.path.split(path) if len(tail) > 0: retlist.insert(0,tail) recurse_path(head,retlist) elif len(head) > 1: recurse_path(head,retlist) else: return retlist = [] path = os.path.realpath(os.path.normpath(path)) drive, path = os.path.splitdrive(path) if len(drive) > 0: retlist.append(drive) recurse_path(path,retlist) return retlist
def sub_links(string,project): ''' Replace links to different parts of the program, formatted as [[name]] or [[name(object-type)]] with the appropriate URL. Can also link to an item's entry in another's page with the syntax [[parent-name:name]]. The object type can be placed in parentheses for either or both of these parts. ''' LINK_TYPES = { 'module': 'modules', 'type': 'types', 'procedure': 'procedures', 'subroutine': 'procedures', 'function': 'procedures', 'proc': 'procedures', 'file': 'allfiles', 'interface': 'absinterfaces', 'absinterface': 'absinterfaces', 'program': 'programs', 'block': 'blockdata' } SUBLINK_TYPES = { 'variable': 'variables', 'type': 'types', 'constructor': 'constructor', 'interface': 'interfaces', 'absinterface': 'absinterfaces', 'subroutine': 'subroutines', 'function': 'functions', 'final': 'finalprocs', 'bound': 'boundprocs', 'modproc': 'modprocs', 'common': 'common' } def convert_link(match): ERR = 'Warning: Could not substitute link {}. {}' url = '' name = '' found = False searchlist = [] item = None #[name,obj,subname,subobj] if not match.group(2): for key, val in LINK_TYPES.items(): searchlist.extend(getattr(project,val)) else: if match.group(2).lower() in LINK_TYPES: searchlist.extend(getattr(project,LINK_TYPES[match.group(2).lower()])) else: print(ERR.format(match.group(),'Unrecognized classification "{}".'.format(match.group(2)))) return match.group() for obj in searchlist: if match.group(1).lower() == obj.name.lower(): url = obj.get_url() name = obj.name found = True item = obj break else: print(ERR.format(match.group(),'"{}" not found.'.format(match.group(1)))) url = '' name = match.group(1) if found and match.group(3): searchlist = [] if not match.group(4): for key, val in SUBLINK_TYPES.items(): if val == 'constructor': if getattr(item,'constructor', False): searchlist.append(item.constructor) else: continue else: searchlist.extend(getattr(item,val,[])) else: if match.group(4).lower() in SUBLINK_TYPES: if hasattr(item,SUBLINK_TYPES[match.group(4).lower()]): if match.group(4).lower == 'constructor': if item.constructor: searchlist.append(item.constructor) else: searchlist.extend(getattr(item,SUBLINK_TYPES[match.group(4).lower()])) else: print(ERR.format(match.group(),'"{}" can not be contained in "{}"'.format(match.group(4),item.obj))) return match.group() else: print(ERR.format(match.group(),'Unrecognized classification "{}".'.format(match.group(2)))) return match.group() for obj in searchlist: if match.group(3).lower() == obj.name.lower(): url = url + '#' + obj.anchor name = obj.name item = obj break else: print(ERR.format(match.group(),'"{0}" not found in "{1}", linking to page for "{1}" instead.'.format(match.group(3),name))) if found: return '<a href="{}">{}</a>'.format(url,name) else: return '<a>{}</a>'.format(name) # Get information from links (need to build an RE) string = LINK_RE.sub(convert_link,string) return string
def sub_macros(string,base_url): ''' Replaces macros in documentation with their appropriate values. These macros are used for things like providing URLs. ''' macros = { '|url|': base_url, '|media|': os.path.join(base_url,'media'), '|page|': os.path.join(base_url,'page') } for key, val in macros.items(): string = string.replace(key,val) return string
def copytree(src, dst): """Replaces shutil.copytree to avoid problems on certain file systems. shutil.copytree() and shutil.copystat() invoke os.setxattr(), which seems to fail when called for directories on at least one NFS file system. The current routine is a simple replacement, which should be good enough for Ford. """ def touch(path): now = time.time() try: # assume it's there os.utime(path, (now, now)) except os.error: # if it isn't, try creating the directory, # a file with that name os.makedirs(os.path.dirname(path)) open(path, "w").close() os.utime(path, (now, now)) for root, dirs, files in os.walk(src): relsrcdir = os.path.relpath(root, src) dstdir = os.path.join(dst, relsrcdir) if not os.path.exists(dstdir): try: os.makedirs(dstdir) except OSError as ex: if ex.errno != errno.EEXIST: raise for ff in files: shutil.copy(os.path.join(root, ff), os.path.join(dstdir, ff)) touch(os.path.join(dstdir, ff))
def truncate(string, width): """ Truncates/pads the string to be the the specified length, including ellipsis dots if truncation occurs. """ if len(string) > width: return string[:width-3] + '...' else: return string.ljust(width)