filename
stringlengths
3
67
data
stringlengths
0
58.3M
license
stringlengths
0
19.5k
types.c
/* * Type table and hash consing */ #include <string.h> #include <assert.h> #include "terms/types.h" #include "utils/hash_functions.h" #include "utils/memalloc.h" #include "utils/refcount_strings.h" #include "yices_limits.h" /* * MACRO TABLE */ /* * Finalizer for names in the symbol table. This is called * whenever a record is removed from the symbol table. * All names must have a reference counter (cf. refcount_strings.h). */ static void macro_name_finalizer(stbl_rec_t *r) { string_decref(r->string); } /* * Allocate and initialize a macro descriptor: * - n = arity * - vars = array of n type variables * - body = type index */ static type_macro_t *new_descriptor(char *name, uint32_t n, const type_t *vars, type_t body) { type_macro_t *tmp; uint32_t i; assert(n <= TYPE_MACRO_MAX_ARITY); tmp = (type_macro_t *) safe_malloc(sizeof(type_macro_t) + n * sizeof(type_t)); tmp->name = name; // We don't need to increment the ref counter here. tmp->arity = n; tmp->body = body; for (i=0; i<n; i++) { tmp->vars[i] = vars[i]; } return tmp; } /* * Same thing for an uninterpreted type constructor * - n = arity */ static type_macro_t *new_constructor(char *name, uint32_t n) { type_macro_t *tmp; tmp = (type_macro_t *) safe_malloc(sizeof(type_macro_t)); tmp->name = name; // no ref count increment required tmp->arity = n; tmp->body = NULL_TYPE; return tmp; } /* * Delete a descriptor */ static inline void delete_descriptor(type_macro_t *d) { safe_free(d); } /* * Initialize the macro table * - n = initial size * - ttbl = type table * - if n is zero, nothing is allocated yet. * an array data of default size will be allocated * on the first addition. */ static void init_type_mtbl(type_mtbl_t *table, uint32_t n) { void **tmp; tmp = NULL; if (n > 0) { if (n > TYPE_MACRO_MAX_SIZE) { out_of_memory(); } tmp = (void **) safe_malloc(n * sizeof(void*)); } table->data = tmp; table->size = n; table->nelems = 0; table->free_idx = -1; init_stbl(&table->stbl, 0); init_tuple_hmap(&table->cache, 0); stbl_set_finalizer(&table->stbl, macro_name_finalizer); } /* * Delete the table and its content */ static void delete_type_mtbl(type_mtbl_t *table) { void *p; uint32_t i, n; n = table->nelems; for (i=0; i<n; i++) { p = table->data[i]; if (! has_int_tag(p)) { delete_descriptor(p); } } safe_free(table->data); table->data = NULL; delete_stbl(&table->stbl); delete_tuple_hmap(&table->cache); } /* * Empty the table: delete all macros and macro instances */ static void reset_type_mtbl(type_mtbl_t *table) { void *p; uint32_t i, n; n = table->nelems; for (i=0; i<n; i++) { p = table->data[i]; if (! has_int_tag(p)) { delete_descriptor(p); } } table->nelems = 0; table->free_idx = -1; reset_stbl(&table->stbl); reset_tuple_hmap(&table->cache); } /* * Make the table larger * - if this is the first allocation: allocate a data array of default size * - otherwise, make the data array 50% larger */ static void extend_type_mtbl(type_mtbl_t *table) { void **tmp; uint32_t n; n = table->size; if (n == 0) { n = TUPLE_HMAP_DEF_SIZE; assert(n <= TYPE_MACRO_MAX_SIZE); tmp = (void **) safe_malloc(n * sizeof(void*)); } else { n ++; n += n>>1; if (n > TYPE_MACRO_MAX_SIZE) { out_of_memory(); } tmp = (void **) safe_realloc(table->data, n * sizeof(void*)); } table->data = tmp; table->size = n; } /* * Get a macro index */ static int32_t allocate_macro_id(type_mtbl_t *table) { int32_t i; i = table->free_idx; if (i >= 0) { assert(i < table->nelems); table->free_idx = untag_i32(table->data[i]); } else { i = table->nelems; table->nelems ++; if (i >= table->size) { extend_type_mtbl(table); assert(i < table->size); } } return i; } /* * Delete descriptor id and add it to the free list * - this must be the index of a live descriptor */ static void free_macro_id(type_mtbl_t *table, int32_t id) { assert(good_type_macro(table, id)); delete_descriptor(table->data[id]); table->data[id] = tag_i32(table->free_idx); table->free_idx = id; } /* * TYPE TABLE */ /* * Finalizer for typenames in the symbol table. This function is * called when record r is deleted from the symbol table. * All symbols must be generated by the clone function, and have * a reference counter (cf. refcount_strings.h). */ static void typename_finalizer(stbl_rec_t *r) { string_decref(r->string); } /* * Initialize table, with initial size = n. */ static void type_table_init(type_table_t *table, uint32_t n) { // abort if the size is too large if (n > YICES_MAX_TYPES) { out_of_memory(); } table->kind = (uint8_t *) safe_malloc(n * sizeof(uint8_t)); table->desc = (type_desc_t *) safe_malloc(n * sizeof(type_desc_t)); table->card = (uint32_t *) safe_malloc(n * sizeof(uint32_t)); table->flags = (uint8_t *) safe_malloc(n * sizeof(uint8_t)); table->name = (char **) safe_malloc(n * sizeof(char *)); table->depth = (uint32_t *) safe_malloc(n * sizeof(uint32_t)); table->size = n; table->nelems = 0; table->free_idx = NULL_TYPE; table->live_types = 0; init_int_htbl(&table->htbl, 0); // use default size init_stbl(&table->stbl, 0); // default size too // install finalizer in the symbol table stbl_set_finalizer(&table->stbl, typename_finalizer); // don't allocate sup/inf/max tables table->sup_tbl = NULL; table->inf_tbl = NULL; table->max_tbl = NULL; // macro table: not allocated yet table->macro_tbl = NULL; } /* * Extend the table: make it 50% larger */ static void type_table_extend(type_table_t *table) { uint32_t n; /* * new size = 1.5 * (old_size + 1) approximately * this computation can't overflow since old_size < YICES_MAX_TYPE * this also ensures that new size > old size (even if old_size <= 1). */ n = table->size + 1; n += n >> 1; if (n > YICES_MAX_TYPES) { out_of_memory(); } table->kind = (uint8_t *) safe_realloc(table->kind, n * sizeof(uint8_t)); table->desc = (type_desc_t *) safe_realloc(table->desc, n * sizeof(type_desc_t)); table->card = (uint32_t *) safe_realloc(table->card, n * sizeof(uint32_t)); table->flags = (uint8_t *) safe_realloc(table->flags, n * sizeof(uint8_t)); table->name = (char **) safe_realloc(table->name, n * sizeof(char *)); table->depth = (uint32_t *) safe_realloc(table->depth, n * sizeof(uint32_t)); table->size = n; } /* * Get a free type id and initializes its name to NULL. * The other fields are not initialized. */ static type_t allocate_type_id(type_table_t *table) { type_t i; i = table->free_idx; if (i >= 0) { table->free_idx = table->desc[i].next; } else { i = table->nelems; table->nelems ++; if (i >= table->size) { type_table_extend(table); } } table->name[i] = NULL; table->live_types ++; return i; } /* * Erase type i: free its descriptor and add i to the free list */ static void erase_type(type_table_t *table, type_t i) { switch (table->kind[i]) { case UNUSED_TYPE: // already deleted case BOOL_TYPE: case INT_TYPE: case REAL_TYPE: return; // never delete predefined types case BITVECTOR_TYPE: case SCALAR_TYPE: case UNINTERPRETED_TYPE: break; case TUPLE_TYPE: case FUNCTION_TYPE: case INSTANCE_TYPE: safe_free(table->desc[i].ptr); break; } if (table->name[i] != NULL) { string_decref(table->name[i]); table->name[i] = NULL; } table->kind[i] = UNUSED_TYPE; table->desc[i].next = table->free_idx; table->free_idx = i; assert(table->live_types > 0); table->live_types --; } /* * INTERNAL CACHES */ /* * Get the sup_table: create and initialize it if needed */ static int_hmap2_t *get_sup_table(type_table_t *table) { int_hmap2_t *hmap; hmap = table->sup_tbl; if (hmap == NULL) { hmap = (int_hmap2_t *) safe_malloc(sizeof(int_hmap2_t)); init_int_hmap2(hmap, 0); // default size table->sup_tbl = hmap; } return hmap; } /* * Get the inf_table: create and initialize it if needed */ static int_hmap2_t *get_inf_table(type_table_t *table) { int_hmap2_t *hmap; hmap = table->inf_tbl; if (hmap == NULL) { hmap = (int_hmap2_t *) safe_malloc(sizeof(int_hmap2_t)); init_int_hmap2(hmap, 0); // default size table->inf_tbl = hmap; } return hmap; } /* * Get the max_table */ static int_hmap_t *get_max_table(type_table_t *table) { int_hmap_t *hmap; hmap = table->max_tbl; if (hmap == NULL) { hmap = (int_hmap_t *) safe_malloc(sizeof(int_hmap_t)); init_int_hmap(hmap, 0); table->max_tbl = hmap; } return hmap; } /* * INTERNAL MACRO TABLE */ static type_mtbl_t *get_macro_table(type_table_t *table) { type_mtbl_t *tbl; tbl = table->macro_tbl; if (tbl == NULL) { tbl = (type_mtbl_t *) safe_malloc(sizeof(type_mtbl_t)); init_type_mtbl(tbl, TYPE_MACRO_DEF_SIZE); table->macro_tbl = tbl; } return tbl; } /* * SUPPORT FOR CARD/FLAGS COMPUTATION */ /* * Build the conjunction of flags for types a[0 ... n-1] * * In the result we have * - finite flag = 1 if a[0] ... a[n-1] are all finite * - unit flag = 1 if a[0] ... a[n-1] are all unit types * - exact flag = 1 if a[0] ... a[n-1] are all small or unit types * - max flag = 1 if a[0] ... a[n-1] are all maximal types * - min flag = 1 if a[0] ... a[n-1] are all minimal types * - ground flag = 1 if a[0] ... a[n-1] are all ground types */ static uint32_t type_flags_conjunct(type_table_t *table, uint32_t n, const type_t *a) { uint32_t i, flg; flg = UNIT_TYPE_FLAGS; for (i=0; i<n; i++) { flg &= type_flags(table, a[i]); } return flg; } /* * Product of cardinalities of all types in a[0 ... n-1] * - return a value > UINT32_MAX if there's an overflow */ static uint64_t type_card_product(type_table_t *table, uint32_t n, const type_t *a) { uint64_t prod; uint32_t i; prod = 1; for (i=0; i<n; i++) { prod *= type_card(table, a[i]); if (prod > UINT32_MAX) break; } return prod; } /* * Compute the cardinality of function type e[0] ... e[n-1] --> r * - all types e[0] ... e[n-1] must be small or unit * - r must be small * - return a value > UINT32_MAX if there's an overflow */ static uint64_t fun_type_card(type_table_t *table, uint32_t n, const type_t *e, type_t r) { uint64_t power, dom; uint32_t range; dom = type_card_product(table, n, e); // domain size if (dom >= 32) { // since the range has size 2 or more // power = range^dom does not fit in 32bits power = UINT32_MAX; power ++; } else { // compute power = range^dom // since dom is small we do this the easy way range = type_card(table, r); assert(2 <= range && dom >= 1); power = range; while (dom > 1) { power *= range; if (power > UINT32_MAX) break; dom --; } } return power; } /* * DEPTH COMPUTATION */ // for tuple static uint32_t depth_tuple_type(type_table_t *table, uint32_t n, const type_t *e) { uint32_t i, max, d; max = 0; for (i=0; i<n; i++) { d = type_depth(table, e[i]); if (d > max) { max = d; } } return 1 + max; } // for function type static uint32_t depth_function_type(type_table_t *table, uint32_t n, const type_t *e, type_t r) { uint32_t i, max, d; max = type_depth(table, r); for (i=0; i<n; i++) { d = type_depth(table, e[i]); if (d > max) { max = d; } } return 1 + max; } // for instance type: same as tuple static inline uint32_t depth_instance_type(type_table_t *table, uint32_t n, const type_t *param) { return depth_tuple_type(table, n, param); } /* * TYPE CREATION */ /* * Add the three predefined types */ static void add_primitive_types(type_table_t *table) { type_t i; i = allocate_type_id(table); assert(i == bool_id); table->kind[i] = BOOL_TYPE; table->desc[i].ptr = NULL; table->card[i] = 2; table->flags[i] = SMALL_TYPE_FLAGS; table->depth[i] = 0; i = allocate_type_id(table); assert(i == int_id); table->kind[i] = INT_TYPE; table->desc[i].ptr = NULL; table->card[i] = UINT32_MAX; table->flags[i] = (INFINITE_TYPE_FLAGS | TYPE_IS_MINIMAL_MASK); table->depth[i] = 0; i = allocate_type_id(table); assert(i == real_id); table->kind[i] = REAL_TYPE; table->desc[i].ptr = NULL; table->card[i] = UINT32_MAX; table->flags[i] = (INFINITE_TYPE_FLAGS | TYPE_IS_MAXIMAL_MASK); table->depth[i] = 0; } /* * Add type (bitvector k) and return its id * - k must be positive and no more than YICES_MAX_BVSIZE */ static type_t new_bitvector_type(type_table_t *table, uint32_t k) { type_t i; assert(0 < k && k <= YICES_MAX_BVSIZE); i = allocate_type_id(table); table->kind[i] = BITVECTOR_TYPE; table->desc[i].integer = k; table->depth[i] = 0; if (k < 32) { table->card[i] = ((uint32_t) 1) << k; table->flags[i] = SMALL_TYPE_FLAGS; } else { table->card[i] = UINT32_MAX; table->flags[i] = LARGE_TYPE_FLAGS; } return i; } /* * Add a scalar type and return its id * - k = number of elements in the type * - k must be positive. */ type_t new_scalar_type(type_table_t *table, uint32_t k) { type_t i; assert(k > 0); i = allocate_type_id(table); table->kind[i] = SCALAR_TYPE; table->desc[i].integer = k; table->card[i] = k; table->depth[i] = 0; if (k == 1) { table->flags[i] = UNIT_TYPE_FLAGS; } else { table->flags[i] = SMALL_TYPE_FLAGS; } return i; } /* * Add a new uninterpreted type and return its id * - the type is infinite and both minimal and maximal */ type_t new_uninterpreted_type(type_table_t *table) { type_t i; i = allocate_type_id(table); table->kind[i] = UNINTERPRETED_TYPE; table->desc[i].ptr = NULL; table->card[i] = UINT32_MAX; table->flags[i] = (INFINITE_TYPE_FLAGS | TYPE_IS_MAXIMAL_MASK | TYPE_IS_MINIMAL_MASK); table->depth[i] = 0; return i; } /* * Add tuple type: e[0], ..., e[n-1] */ static type_t new_tuple_type(type_table_t *table, uint32_t n, const type_t *e) { tuple_type_t *d; uint64_t card; type_t i; uint32_t j, flag; assert(0 < n && n <= YICES_MAX_ARITY); d = (tuple_type_t *) safe_malloc(sizeof(tuple_type_t) + n * sizeof(type_t)); d->nelem = n; for (j=0; j<n; j++) d->elem[j] = e[j]; i = allocate_type_id(table); table->kind[i] = TUPLE_TYPE; table->desc[i].ptr = d; /* * set flags and card * - type_flags_conjunct sets all the bits correctly * except possibly the exact card bit */ flag = type_flags_conjunct(table, n, e); switch (flag) { case UNIT_TYPE_FLAGS: // all components are unit types card = 1; break; case SMALL_TYPE_FLAGS: // all components are unit or small types card = type_card_product(table, n, e); if (card > UINT32_MAX) { // the product does not fit in 32bits // change exact card to inexact card card = UINT32_MAX; flag = LARGE_TYPE_FLAGS; } break; default: assert(flag == FREE_TYPE_FLAGS || flag == LARGE_TYPE_FLAGS || (flag & ~MINMAX_FLAGS_MASK) == INFINITE_TYPE_FLAGS); card = UINT32_MAX; break; } assert(0 < card && card <= UINT32_MAX); table->card[i] = card; table->flags[i] = flag; table->depth[i] = depth_tuple_type(table, n, e); return i; } /* * Add function type: (e[0], ..., e[n-1] --> r) */ static type_t new_function_type(type_table_t *table, uint32_t n, const type_t *e, type_t r) { function_type_t *d; uint64_t card; type_t i; uint32_t j, flag, rflag, minmax; assert(0 < n && n <= YICES_MAX_ARITY); d = (function_type_t *) safe_malloc(sizeof(function_type_t) + n * sizeof(type_t)); d->range = r; d->ndom = n; for (j=0; j<n; j++) d->domain[j] = e[j]; i = allocate_type_id(table); table->kind[i] = FUNCTION_TYPE; table->desc[i].ptr = d; /* * Three of the function type's flags are inherited from the range: * - fun type is unit iff range is unit (and the domains are ground) * - fun type is maximal iff range is maximal * - fun type is minimal iff range is minimal */ rflag = type_flags(table, r); minmax = rflag & MINMAX_FLAGS_MASK; // save min and max bits flag = rflag & type_flags_conjunct(table, n, e); /* * The function type has the same flags as the range type if * flag != FREE_TYPE_FLAGS and the range type is unit */ if (flag != FREE_TYPE_FLAGS && rflag == UNIT_TYPE_FLAGS) { flag = rflag; } switch (flag) { case FREE_TYPE_FLAGS: // the range or at least one domain is not ground card = UINT32_MAX; break; case UNIT_TYPE_FLAGS: // singleton range so the function type is also a singleton card = 1; break; case SMALL_TYPE_FLAGS: // the range is small finite // all domains are small finite or unit card = fun_type_card(table, n, e, r); if (card > UINT32_MAX) { card = UINT32_MAX; flag = LARGE_TYPE_FLAGS; } // minmax bits are inherited from the range flag = minmax | (flag & ~MINMAX_FLAGS_MASK); break; default: // the range or at least one domain is infinite // or the range and all domains are finite but at least one // of them is large. assert(flag == LARGE_TYPE_FLAGS || (flag & ~MINMAX_FLAGS_MASK) == INFINITE_TYPE_FLAGS); card = UINT32_MAX; flag = minmax | (flag & ~MINMAX_FLAGS_MASK); break; } assert(0 < card && card <= UINT32_MAX); table->card[i] = card; table->flags[i] = flag; table->depth[i] = depth_function_type(table, n, e, r); return i; } /* * Add a new type variable of the given id */ static type_t new_type_variable(type_table_t *table, uint32_t id) { type_t i; i = allocate_type_id(table); table->kind[i] = VARIABLE_TYPE; table->desc[i].integer = id; table->card[i] = UINT32_MAX; // card is not defined table->flags[i] = FREE_TYPE_FLAGS; table->depth[i] = 0; return i; } /* * Add a new instance of the given constructor cid * - n = arity * - param[0 ... n-1] = parameters * * If param[0] ... param[n-1] are all ground types, then the instance * is treated like a new uninterpreted type. Otherwise, we mark it * as a type with variables (flag = FREE_TYPE_FLAGS, card = UINT32_MAX). */ static type_t new_instance_type(type_table_t *table, int32_t cid, uint32_t n, const type_t *param) { instance_type_t *d; type_t i; uint32_t j, flag; assert(0 < n && n <= YICES_MAX_ARITY); assert(table->macro_tbl != NULL && type_macro_arity(table->macro_tbl, cid) == n); d = (instance_type_t *) safe_malloc(sizeof(instance_type_t) + n * sizeof(type_t)); d->cid = cid; d->arity = n; for (j=0; j<n; j++) { d->param[j] = param[j]; } i = allocate_type_id(table); table->kind[i] = INSTANCE_TYPE; table->desc[i].ptr = d; table->card[i] = UINT32_MAX; flag = type_flags_conjunct(table, n, param); assert((flag & TYPE_IS_GROUND_MASK) || flag == FREE_TYPE_FLAGS); if (flag & TYPE_IS_GROUND_MASK) { // set flags as for uninterpreted types flag = (INFINITE_TYPE_FLAGS | TYPE_IS_MAXIMAL_MASK | TYPE_IS_MINIMAL_MASK); } table->flags[i] = flag; table->depth[i] = depth_instance_type(table, n, param); return i; } /* * HASH CONSING */ /* * Objects for hash-consing */ typedef struct bv_type_hobj_s { int_hobj_t m; // methods type_table_t *tbl; uint32_t size; } bv_type_hobj_t; typedef struct tuple_type_hobj_s { int_hobj_t m; type_table_t *tbl; uint32_t n; const type_t *elem; } tuple_type_hobj_t; typedef struct function_type_hobj_s { int_hobj_t m; type_table_t *tbl; type_t range; uint32_t n; const type_t *dom; } function_type_hobj_t; typedef struct type_var_hobj_s { int_hobj_t m; type_table_t *tbl; uint32_t id; } type_var_hobj_t; typedef struct instance_type_hobj_s { int_hobj_t m; type_table_t *tbl; int32_t cid; uint32_t arity; const type_t *param; } instance_type_hobj_t; /* * Hash functions */ static uint32_t hash_bv_type(bv_type_hobj_t *p) { return jenkins_hash_pair(p->size, 0, 0x7838abe2); } static uint32_t hash_tuple_type(tuple_type_hobj_t *p) { return jenkins_hash_intarray2(p->elem, p->n, 0x8193ea92); } static uint32_t hash_function_type(function_type_hobj_t *p) { uint32_t h; h = jenkins_hash_intarray2(p->dom, p->n, 0x5ad7b72f); return jenkins_hash_pair(p->range, 0, h); } static uint32_t hash_type_var(type_var_hobj_t *p) { return jenkins_hash_pair(p->id, 0, 0x823a33ad); } static uint32_t hash_instance_type(instance_type_hobj_t *p) { uint32_t h; h = jenkins_hash_intarray2(p->param, p->arity, 0xabe3d76F); return jenkins_hash_pair(p->cid, 0, h); } /* * Hash functions used during garbage collection. * Make sure they are consistent with the ones above. */ static uint32_t hash_bvtype(int32_t size) { return jenkins_hash_pair(size, 0, 0x7838abe2); } static uint32_t hash_tupletype(tuple_type_t *p) { return jenkins_hash_intarray2(p->elem, p->nelem, 0x8193ea92); } static uint32_t hash_funtype(function_type_t *p) { uint32_t h; h = jenkins_hash_intarray2(p->domain, p->ndom, 0x5ad7b72f); return jenkins_hash_pair(p->range, 0, h); } static uint32_t hash_typevar(uint32_t id) { return jenkins_hash_pair(id, 0, 0x823a33ad); } static uint32_t hash_instancetype(instance_type_t *p) { uint32_t h; h = jenkins_hash_intarray2(p->param, p->arity, 0xabe3d76F); return jenkins_hash_pair(p->cid, 0, h); } /* * Comparison functions for hash consing */ static bool eq_bv_type(bv_type_hobj_t *p, type_t i) { type_table_t *table; table = p->tbl; return table->kind[i] == BITVECTOR_TYPE && table->desc[i].integer == p->size; } static bool eq_tuple_type(tuple_type_hobj_t *p, type_t i) { type_table_t *table; tuple_type_t *d; int32_t j; table = p->tbl; if (table->kind[i] != TUPLE_TYPE) return false; d = (tuple_type_t *) table->desc[i].ptr; if (d->nelem != p->n) return false; for (j=0; j<p->n; j++) { if (d->elem[j] != p->elem[j]) return false; } return true; } static bool eq_function_type(function_type_hobj_t *p, type_t i) { type_table_t *table; function_type_t *d; int32_t j; table = p->tbl; if (table->kind[i] != FUNCTION_TYPE) return false; d = (function_type_t *) table->desc[i].ptr; if (d->range != p->range || d->ndom != p->n) return false; for (j=0; j<p->n; j++) { if (d->domain[j] != p->dom[j]) return false; } return true; } static bool eq_type_var(type_var_hobj_t *p, type_t i) { type_table_t *table; table = p->tbl; return table->kind[i] == VARIABLE_TYPE && table->desc[i].integer == p->id; } static bool eq_instance_type(instance_type_hobj_t *p, type_t i) { type_table_t *table; instance_type_t *d; uint32_t j; table = p->tbl; if (table->kind[i] != INSTANCE_TYPE) return false; d = (instance_type_t *) table->desc[i].ptr; if (d->cid != p->cid || d->arity != p->arity) return false; for (j=0; j<p->arity; j++) { if (d->param[j] != p->param[j]) return false; } return true; } /* * Builder functions */ static type_t build_bv_type(bv_type_hobj_t *p) { return new_bitvector_type(p->tbl, p->size); } static type_t build_tuple_type(tuple_type_hobj_t *p) { return new_tuple_type(p->tbl, p->n, p->elem); } static type_t build_function_type(function_type_hobj_t *p) { return new_function_type(p->tbl, p->n, p->dom, p->range); } static type_t build_type_var(type_var_hobj_t *p) { return new_type_variable(p->tbl, p->id);; } static type_t build_instance_type(instance_type_hobj_t *p) { return new_instance_type(p->tbl, p->cid, p->arity, p->param); } /* * Global Hash Objects */ static bv_type_hobj_t bv_hobj = { { (hobj_hash_t) hash_bv_type, (hobj_eq_t) eq_bv_type, (hobj_build_t) build_bv_type }, NULL, 0, }; static tuple_type_hobj_t tuple_hobj = { { (hobj_hash_t) hash_tuple_type, (hobj_eq_t) eq_tuple_type, (hobj_build_t) build_tuple_type }, NULL, 0, NULL, }; static function_type_hobj_t function_hobj = { { (hobj_hash_t) hash_function_type, (hobj_eq_t) eq_function_type, (hobj_build_t) build_function_type }, NULL, 0, 0, NULL, }; static type_var_hobj_t var_hobj = { { (hobj_hash_t) hash_type_var, (hobj_eq_t) eq_type_var, (hobj_build_t) build_type_var }, NULL, 0, }; static instance_type_hobj_t instance_hobj = { { (hobj_hash_t) hash_instance_type, (hobj_eq_t) eq_instance_type, (hobj_build_t) build_instance_type }, NULL, 0, 0, NULL, }; /* * TABLE MANAGEMENT + EXPORTED TYPE CONSTRUCTORS * * NOTE: The constructors for uninterpreted and scalar types * are defined above. They don't use hash consing. */ /* * Initialize table: add the predefined types */ void init_type_table(type_table_t *table, uint32_t n) { type_table_init(table, n); add_primitive_types(table); } /* * Delete table: free all allocated memory */ void delete_type_table(type_table_t *table) { uint32_t i; // decrement refcount for all names for (i=0; i<table->nelems; i++) { if (table->name[i] != NULL) { string_decref(table->name[i]); } } // delete all allocated descriptors for (i=0; i<table->nelems; i++) { switch (table->kind[i]) { case TUPLE_TYPE: case FUNCTION_TYPE: case INSTANCE_TYPE: safe_free(table->desc[i].ptr); break; default: break; } } safe_free(table->kind); safe_free(table->desc); safe_free(table->card); safe_free(table->flags); safe_free(table->name); safe_free(table->depth); table->kind = NULL; table->desc = NULL; table->card = NULL; table->flags = NULL; table->name = NULL; table->depth = NULL; delete_int_htbl(&table->htbl); delete_stbl(&table->stbl); if (table->sup_tbl != NULL) { delete_int_hmap2(table->sup_tbl); safe_free(table->sup_tbl); table->sup_tbl = NULL; } if (table->inf_tbl != NULL) { delete_int_hmap2(table->inf_tbl); safe_free(table->inf_tbl); table->inf_tbl = NULL; } if (table->max_tbl != NULL) { delete_int_hmap(table->max_tbl); safe_free(table->max_tbl); table->max_tbl = NULL; } if (table->macro_tbl != NULL) { delete_type_mtbl(table->macro_tbl); safe_free(table->macro_tbl); table->macro_tbl = NULL; } } /* * Full reset: delete everything except the primitive types */ void reset_type_table(type_table_t *table) { uint32_t i; // decrement ref counts for (i=0; i<table->nelems; i++) { if (table->name[i] != NULL) { string_decref(table->name[i]); } } // delete descriptors for (i=0; i<table->nelems; i++) { switch (table->kind[i]) { case TUPLE_TYPE: case FUNCTION_TYPE: case INSTANCE_TYPE: safe_free(table->desc[i].ptr); break; default: break; } } reset_int_htbl(&table->htbl); reset_stbl(&table->stbl); if (table->sup_tbl != NULL) { reset_int_hmap2(table->sup_tbl); } if (table->inf_tbl != NULL) { reset_int_hmap2(table->inf_tbl); } if (table->max_tbl != NULL) { int_hmap_reset(table->max_tbl); } if (table->macro_tbl != NULL) { reset_type_mtbl(table->macro_tbl); } table->nelems = 0; table->free_idx = NULL_TYPE; table->live_types = 0; add_primitive_types(table); } /* * Bitvector type */ type_t bv_type(type_table_t *table, uint32_t size) { assert(size > 0); bv_hobj.tbl = table; bv_hobj.size = size; return int_htbl_get_obj(&table->htbl, &bv_hobj.m); } /* * Tuple type */ type_t tuple_type(type_table_t *table, uint32_t n, const type_t elem[]) { assert(0 < n && n <= YICES_MAX_ARITY); tuple_hobj.tbl = table; tuple_hobj.n = n; tuple_hobj.elem = elem; return int_htbl_get_obj(&table->htbl, &tuple_hobj.m); } /* * Function type */ type_t function_type(type_table_t *table, type_t range, uint32_t n, const type_t dom[]) { assert(0 < n && n <= YICES_MAX_ARITY); function_hobj.tbl = table; function_hobj.range = range; function_hobj.n = n; function_hobj.dom = dom; return int_htbl_get_obj(&table->htbl, &function_hobj.m); } /* * Type variable */ type_t type_variable(type_table_t *table, uint32_t id) { var_hobj.tbl = table; var_hobj.id = id; return int_htbl_get_obj(&table->htbl, &var_hobj.m); } /* * Type instance */ type_t instance_type(type_table_t *table, int32_t cid, uint32_t n, const type_t tau[]) { assert(0 < n && n <= YICES_MAX_ARITY); instance_hobj.tbl = table; instance_hobj.cid = cid; instance_hobj.arity = n; instance_hobj.param = tau; return int_htbl_get_obj(&table->htbl, &instance_hobj.m); } /* * SUBSTITUTION */ #ifndef NDEBUG /* * Check that the elements of v are distinct variables */ static bool all_distinct_vars(type_table_t *table, uint32_t n, const type_t v[]) { uint32_t i, j; for (i=0; i<n; i++) { if (! is_type_variable(table, v[i])) { return false; } } for (i=0; i<n; i++) { for (j=i+1; j<n; j++) { if (v[i] == v[j]) { return false; } } } return true; } #endif /* * Apply substitution to tau: * - hmap defines the substitution and stores substitution of already visited types */ static type_t type_subst_recur(type_table_t *table, int_hmap_t *hmap, type_t tau); /* * Build the tuple type (tuple (subst tau[0]) ... (subst tau[n-1])) */ static type_t tuple_type_subst(type_table_t *table, int_hmap_t *hmap, const type_t *tau, uint32_t n) { type_t buffer[8]; type_t *s; type_t result; uint32_t i; s = buffer; if (n > 8) { s = (type_t *) safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { s[i] = type_subst_recur(table, hmap, tau[i]); } result = tuple_type(table, n, s); if (n > 8) { safe_free(s); } return result; } /* * Build the function type (-> (subst tau[0]) ... (subst tau[n-1]) (subst sigma)) */ static type_t function_type_subst(type_table_t *table, int_hmap_t *hmap, type_t sigma, const type_t *tau, uint32_t n) { type_t buffer[8]; type_t *s; type_t result; uint32_t i; s = buffer; if (n > 8) { s = (type_t *) safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { s[i] = type_subst_recur(table, hmap, tau[i]); } sigma = type_subst_recur(table, hmap, sigma); result = function_type(table, sigma, n, s); if (n > 8) { safe_free(s); } return result; } /* * Build the instance (cid (subst tau[0]) ... (sust tau[n-1])) */ static type_t instance_type_subst(type_table_t *table, int_hmap_t *hmap, int32_t cid, type_t *tau, uint32_t n) { type_t buffer[8]; type_t *s; type_t result; uint32_t i; s = buffer; if (n > 8) { s = (type_t *) safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { s[i] = type_subst_recur(table, hmap, tau[i]); } result = instance_type(table, cid, n, s); if (n > 8) { safe_free(s); } return result; } static type_t type_subst_recur(type_table_t *table, int_hmap_t *hmap, type_t tau) { int_hmap_pair_t *p; tuple_type_t *tup; function_type_t *fun; instance_type_t *inst; type_t result; // if tau is ground, then it's unchanged result = tau; if (! ground_type(table, tau)) { p = int_hmap_find(hmap, tau); if (p != NULL) { result = p->val; } else { switch (type_kind(table, tau)) { case TUPLE_TYPE: tup = tuple_type_desc(table, tau); result = tuple_type_subst(table, hmap, tup->elem, tup->nelem); p = int_hmap_get(hmap, tau); assert(p->val < 0); p->val = result; break; case FUNCTION_TYPE: fun = function_type_desc(table, tau); result = function_type_subst(table, hmap, fun->range, fun->domain, fun->ndom); p = int_hmap_get(hmap, tau); assert(p->val < 0); p->val = result; break; case INSTANCE_TYPE: inst = instance_type_desc(table, tau); result = instance_type_subst(table, hmap, inst->cid, inst->param, inst->arity); p = int_hmap_get(hmap, tau); assert(p->val < 0); p->val = result; break; default: assert(is_type_variable(table, tau)); result = tau; break; } } } return result; } /* * Apply a type substitution: * v[0 ... n-1] = distinct type variables * s[0 ... n-1] = types * the function replaces v[i] by s[i] in tau and returns * the result. */ type_t type_substitution(type_table_t *table, type_t tau, uint32_t n, const type_t v[], const type_t s[]) { int_hmap_t hmap; int_hmap_pair_t *p; uint32_t i; type_t result; assert(all_distinct_vars(table, n, v)); result = tau; if (! ground_type(table, tau)) { init_int_hmap(&hmap, 0); for (i=0; i<n; i++) { p = int_hmap_get(&hmap, v[i]); assert(p->key == v[i] && p->val < 0); p->val = s[i]; } result = type_subst_recur(table, &hmap, tau); delete_int_hmap(&hmap); } return result; } /* * MATCHING */ /* * Initialize matcher */ void init_type_matcher(type_matcher_t *matcher, type_table_t *types) { uint32_t n; matcher->types = types; init_int_hmap(&matcher->tc, 0); // used default size n = DEF_TYPE_MATCHER_SIZE; assert(n <= MAX_TYPE_MATCHER_SIZE); matcher->var = (type_t *) safe_malloc(n * sizeof(type_t)); matcher->map = (type_t *) safe_malloc(n * sizeof(type_t)); matcher->nvars = 0; matcher->varsize = n; } /* * Make room for more variables */ static void type_matcher_extend(type_matcher_t *matcher) { uint32_t n; n = matcher->varsize; n += (n >> 1); // 50% larger n ++; if (n > MAX_TYPE_MATCHER_SIZE) { out_of_memory(); } matcher->var = (type_t *) safe_realloc(matcher->var, n * sizeof(type_t)); matcher->map = (type_t *) safe_realloc(matcher->map, n * sizeof(type_t)); matcher->varsize = n; } /* * Add a type variable x to matcher->var * - x is mapped to NULL_TYPE */ static void type_matcher_addvar(type_matcher_t *matcher, type_t x) { uint32_t i; assert(is_type_variable(matcher->types, x)); i = matcher->nvars; if (i == matcher->varsize) { type_matcher_extend(matcher); } assert(i < matcher->varsize); matcher->var[i] = x; matcher->map[i] = NULL_TYPE; matcher->nvars = i + 1; } /* * Reset to the empty set */ void reset_type_matcher(type_matcher_t *matcher) { int_hmap_reset(&matcher->tc); matcher->nvars = 0; } /* * Delete all */ void delete_type_matcher(type_matcher_t *matcher) { delete_int_hmap(&matcher->tc); safe_free(matcher->var); safe_free(matcher->map); } /* * Constraint code for (sigma, eq): * - low-order bit = 1 --> equality constraint * - low-order bit = 0 --> type inclusion constraint * - rest of the 32bit integer is sigma */ static inline int32_t mk_constraint_code(type_t sigma, bool eq) { int32_t k; assert(0 <= sigma); k = (sigma << 1) | eq; assert(k >= 0); return k; } /* * Check the type of constraint encoded by k */ static inline bool is_eq_constraint(int32_t k) { assert(k >= 0); return (k & 1) != 0; } #ifndef NDEBUG static inline bool is_subtype_constraint(int32_t k) { assert(k >= 0); return (k & 1) == 0; } #endif static inline type_t arg_of_constraint(int32_t k) { assert(k >= 0); return k >> 1; } /* * Check whether constraint codes k1 and k2 are compatible * - at least one of k1 and k2 must be non-negative * - if so return the code for the conjunction of k1 and k2 * - otherwise return -1 */ static int32_t merge_constraints(type_matcher_t *matcher, int32_t k1, int32_t k2) { type_t sigma1, sigma2, sigma; assert(k1 >= 0 || k2 >= 0); if (k1 < 0) return k2; if (k2 < 0) return k1; if (k1 == k2) return k1; sigma1 = arg_of_constraint(k1); sigma2 = arg_of_constraint(k2); if (is_eq_constraint(k1) && is_eq_constraint(k2)) { // k1 says [tau == sigma1] // k2 says [tau == sigma2] assert(sigma1 != sigma2); return -1; } if (is_eq_constraint(k1)) { assert(is_subtype_constraint(k2)); // k1 says [tau == sigma1] // k2 says [tau is a supertype of sigma2] if (is_subtype(matcher->types, sigma2, sigma1)) { return k1; } return -1; } if (is_eq_constraint(k2)) { assert(is_subtype_constraint(k1)); // k1 says [tau is a supertype of sigma1] // k2 says [tau == sigma2] if (is_subtype(matcher->types, sigma1, sigma2)) { return k2; } return -1; } assert(is_subtype_constraint(k1) && is_subtype_constraint(k2)); // k1 says [tau is a supertype of sigma1] // k2 says [tau is a supertype of sigma2] sigma = super_type(matcher->types, sigma1, sigma2); if (sigma != NULL_TYPE) { return mk_constraint_code(sigma, false); // [tau is a supertype of sigma] } return -1; } /* * Get the constraint code for tau * -1 means no constraint on tau yet */ static int32_t type_matcher_get_constraint(type_matcher_t *matcher, type_t tau) { int_hmap_pair_t *p; int32_t k; k = -1; p = int_hmap_find(&matcher->tc, tau); if (p != NULL) { k = p->val; } return k; } /* * Set the constraint code for tau to k * k must be a valid constraint code(not -1) */ static void type_matcher_set_constraint(type_matcher_t *matcher, type_t tau, int32_t k) { int_hmap_pair_t *p; assert(k >= 0 && good_type(matcher->types, arg_of_constraint(k))); p = int_hmap_get(&matcher->tc, tau); assert(p->key == tau); p->val = k; } /* * Add a set of constraints: * - a and b must be array of types of the same size * - n = size of these arrays * - eq = constraint type * * Each a[i] should be a type to be matched with b[i] * - if eq is true, we want exact matching * - if eq is false, we want b[i] \subtype of a[i] * * - return false if the matching fails, true otherwise */ static bool match_type_arrays(type_matcher_t *matcher, type_t *a, type_t *b, uint32_t n, bool eq) { uint32_t i; for (i=0; i<n; i++) { if (!type_matcher_add_constraint(matcher, a[i], b[i], eq)) { return false; } } return true; } // check matching between two tuple types static bool match_tuple_types(type_matcher_t *matcher, tuple_type_t *tau, tuple_type_t *sigma, bool eq) { uint32_t n; n = tau->nelem; return n == sigma->nelem && match_type_arrays(matcher, tau->elem, sigma->elem, n, eq); } /* * Check matching between two function types: * - we add equality constraints for the domain types * - we propagate 'eq' for the range */ static bool match_function_types(type_matcher_t *matcher, function_type_t *tau, function_type_t *sigma, bool eq) { uint32_t n; n = tau->ndom; return n == sigma->ndom && match_type_arrays(matcher, tau->domain, sigma->domain, n, true) && type_matcher_add_constraint(matcher, tau->range, sigma->range, eq); } /* * For instance types: we force equality * - e.g., List[X] is a subtype of List[Y] iff (List[X] == List[Y]) iff (X == Y) */ static bool match_instance_types(type_matcher_t *matcher, instance_type_t *tau, instance_type_t *sigma) { assert(tau->cid != sigma->cid || tau->arity == sigma->arity); return tau->cid == sigma->cid && match_type_arrays(matcher, tau->param, sigma->param, tau->arity, true); } /* * Add a type constraint: * - both tau and sigma must be valid types defined in matcher->types * (and tau should contain type variables) * - if eq is true the constraint is "tau = sigma" * otherwise it's "tau is a supertype of sigma" * - return false if the set of constraints is inconsistent * - return true otherwise and update the solution */ bool type_matcher_add_constraint(type_matcher_t *matcher, type_t tau, type_t sigma, bool eq) { type_table_t *table; int32_t k1, k2; table = matcher->types; assert(good_type(table, tau) && good_type(table, sigma)); if (eq && ground_type(table, tau)) { return tau == sigma; } switch (type_kind(table, tau)) { case UNUSED_TYPE: assert(false); // should not happen break; case BOOL_TYPE: case INT_TYPE: case BITVECTOR_TYPE: case SCALAR_TYPE: case UNINTERPRETED_TYPE: // tau is a minimal type to (sigma subtype of tau) is the same as tau == sigma assert(! eq); return tau == sigma; case REAL_TYPE: // (sigma subtype of tau) IFF (sigma is int or real) assert(! eq && tau == real_id); return sigma == int_id || sigma == real_id; case VARIABLE_TYPE: k1 = type_matcher_get_constraint(matcher, tau); k2 = merge_constraints(matcher, k1, mk_constraint_code(sigma, eq)); if (k2 >= 0) { // no conflict if (k1 != k2) { type_matcher_set_constraint(matcher, tau, k2); if (k1 < 0) { type_matcher_addvar(matcher, tau); } } return true; } break; case TUPLE_TYPE: if (type_kind(table, sigma) == TUPLE_TYPE) { k1 = type_matcher_get_constraint(matcher, tau); k2 = merge_constraints(matcher, k1, mk_constraint_code(sigma, eq)); if (k2 >= 0) { if (k2 == k1) return true; // new constraint on tau encoded in k2 sigma = arg_of_constraint(k2); eq = is_eq_constraint(eq); if (match_tuple_types(matcher, tuple_type_desc(table, tau), tuple_type_desc(table, sigma), eq)) { type_matcher_set_constraint(matcher, tau, k2); return true; } } } break; case FUNCTION_TYPE: if (type_kind(table, sigma) == FUNCTION_TYPE) { k1 = type_matcher_get_constraint(matcher, tau); k2 = merge_constraints(matcher, k1, mk_constraint_code(sigma, eq)); if (k2 >= 0) { if (k1 == k2) return true; // new constraint on tau encoded in k2 sigma = arg_of_constraint(k2); eq = is_eq_constraint(eq); if (match_function_types(matcher, function_type_desc(table, tau), function_type_desc(table, sigma), eq)) { type_matcher_set_constraint(matcher, tau, k2); return true; } } } break; case INSTANCE_TYPE: if (type_kind(table, sigma) == INSTANCE_TYPE) { // we ignore eq here (i.e., do as if eq is true) k1 = type_matcher_get_constraint(matcher, tau); k2 = merge_constraints(matcher, k1, mk_constraint_code(sigma, true)); if (k2 >= 0) { if (k1 == k2) return true; // new constraint on tau sigma = arg_of_constraint(k2); if (match_instance_types(matcher, instance_type_desc(table, tau), instance_type_desc(table, sigma))) { type_matcher_set_constraint(matcher, tau, k2); return true; } } } break; } return false; } /* * Collect the substitution stored in matcher * - this is defined only if the matching worked (i.e., add_constraint did not return false) */ void type_matcher_build_subst(type_matcher_t *matcher) { uint32_t i, n; int32_t k; n = matcher->nvars; for (i=0; i<n; i++) { k = type_matcher_get_constraint(matcher, matcher->var[i]); assert(k >= 0); matcher->map[i] = arg_of_constraint(k); } } /* * Apply the matcher's substitution to tau */ type_t apply_type_matching(type_matcher_t *matcher, type_t tau) { return type_substitution(matcher->types, tau, matcher->nvars, matcher->var, matcher->map); } #if 0 /* * Check whether tau matches sigma * - if so build a substitution S, such that S(tau) = sigma * - S is stored in the hash_map subst * * - both tau and sigma must be defined in table. * - subst must be initialized. * * If subst is not empty, then the matching test is relative to the * current S (i.e., the search is for a substitution S' that extends S) */ bool types_match(type_table_t *table, type_t tau, type_t sigma, int_hmap_t *subst) { int_hmap_pair_t *p; type_kind_t sigma_kind; type_kind_t tau_kind; assert(good_type(table, tau) && good_type(table, sigma)); if (ground_type(table, tau)) { return tau == sigma; } p = int_hmap_get(subst, tau); assert(p->key == tau); if (p->val >= 0) { assert(good_type(table, p->val)); // tau is already mapped to p->val by subst return p->val == sigma; } tau_kind = type_kind(table, tau); if (tau_kind == VARIABLE_TYPE) { // success: add [tau := sigma] to hmap p->val = sigma; return true; } sigma_kind = type_kind(table, sigma); if (sigma_kind != tau_kind) { return false; } // recursively check whether the children match switch (type_kind(table, tau)) { case TUPLE_TYPE: if (! match_tuple_types(table, tuple_type_desc(table, tau), tuple_type_desc(table, sigma), subst)) { return false; } break; case FUNCTION_TYPE: if (! match_function_types(table, function_type_desc(table, tau), function_type_desc(table, sigma), subst)) { return false; } break; case INSTANCE_TYPE: if (! match_instance_types(table, instance_type_desc(table, tau), instance_type_desc(table, sigma), subst)) { return false; } break; default: assert(false); break; } /* * tau matches sigma: store [tau --> sigma] in subst * we can't reuse p here since the recursive calls may have modified the hash_map */ p = int_hmap_get(subst, tau); assert(p->key == tau && p->val < 0); p->val = sigma; return true; } #endif /* * TYPE NAMES */ /* * Assign name to type i. * - the previous mapping of name to other types (if any) is hidden. * - name must have a reference counter attached to it (cf. clone_string * in memalloc.h). */ void set_type_name(type_table_t *table, type_t i, char *name) { if (table->name[i] == NULL) { table->name[i] = name; string_incref(name); } stbl_add(&table->stbl, name, i); string_incref(name); } /* * Get type mapped to the name (or NULL_TYPE) */ type_t get_type_by_name(type_table_t *table, const char *name) { // NULL_TYPE = -1 and stbl_find returns -1 if name is absent return stbl_find(&table->stbl, name); } /* * Remove a type name. */ void remove_type_name(type_table_t *table, const char *name) { stbl_remove(&table->stbl, name); } /* * Remove the name of t */ void clear_type_name(type_table_t *table, type_t t) { char *name; name = table->name[t]; if (name != NULL) { if (stbl_find(&table->stbl, name) == t) { stbl_remove(&table->stbl, name); } table->name[t] = NULL; string_decref(name); } } /* * CARDINALITY */ /* * Approximate cardinality of tau[0] x ... x tau[n-1] * - returns the same value as card_of(tuple_type(tau[0] ... tau[n-1])) but does not * construct the tuple type. */ uint32_t card_of_type_product(type_table_t *table, uint32_t n, const type_t *tau) { uint64_t card; card = type_card_product(table, n, tau); if (card > UINT32_MAX) { card = UINT32_MAX; } assert(1 <= card && card <= UINT32_MAX); return (uint32_t) card; } /* * Approximate cardinality of the domain and range of a function type tau */ uint32_t card_of_domain_type(type_table_t *table, type_t tau) { function_type_t *d; d = function_type_desc(table, tau); return card_of_type_product(table, d->ndom, d->domain); } uint32_t card_of_range_type(type_table_t *table, type_t tau) { return type_card(table, function_type_range(table, tau)); } /* * Check whether a function type has a finite domain or range * - tau must be a function type. */ bool type_has_finite_domain(type_table_t *table, type_t tau) { function_type_t *fun; uint32_t flag; fun = function_type_desc(table, tau); flag = type_flags_conjunct(table, fun->ndom, fun->domain); return flag & TYPE_IS_FINITE_MASK; } bool type_has_finite_range(type_table_t *table, type_t tau) { return is_finite_type(table, function_type_range(table, tau)); } /* * COMMON SUPERTYPE */ /* * Try to compute sup(tau1, tau2) cheaply * - return UNKNOWN_TYPE if that fails */ #define UNKNOWN_TYPE (-2) static type_t cheap_sup(type_table_t *table, type_t tau1, type_t tau2) { assert(good_type(table, tau1) && good_type(table, tau2)); if (tau1 == tau2) { return tau1; } if ((tau1 == int_id && tau2 == real_id) || (tau1 == real_id && tau2 == int_id)) { return real_id; } switch (table->kind[tau1]) { case TUPLE_TYPE: if (table->kind[tau2] != TUPLE_TYPE || tuple_type_arity(table, tau1) != tuple_type_arity(table, tau2)) { return NULL_TYPE; } break; case FUNCTION_TYPE: if (table->kind[tau2] != FUNCTION_TYPE || function_type_arity(table, tau1) != function_type_arity(table, tau2)) { return NULL_TYPE; } break; default: return NULL_TYPE; } return UNKNOWN_TYPE; } /* * Construct sup of two tuple types of equal arity n: * - first tuple components are a[0] .... a[n-1] * - second tuple components are b[0] ... b[n-1] * The result is either NULL_TYPE or (tuple s[0] ... s[n-1]) * where s[i] = sup(a[i], b[i]). */ static type_t sup_tuple_types(type_table_t *table, uint32_t n, type_t *a, type_t *b) { type_t buffer[8]; type_t *s; type_t aux; uint32_t i; /* * For intermediate results, we use a buffer of 8 types. * That should be enough in most cases. Otherwise * we allocate a larger buffer s. */ s = buffer; if (n > 8) { s = (type_t *) safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { aux = super_type(table, a[i], b[i]); if (aux == NULL_TYPE) goto done; s[i] = aux; } aux = tuple_type(table, n, s); done: if (n > 8) { safe_free(s); } return aux; } /* * Check whether a[0 ... n-1] and b[0 ... n-1] * are equal (i.e., same function domain). */ static bool equal_type_arrays(uint32_t n, type_t *a, type_t *b) { uint32_t i; for (i=0; i<n; i++) { if (a[i] != b[i]) return false; } return true; } /* * Construct sup of two function types sigma1 and sigma2 of * equal domain and arity. * - n = arity * - a[0] ... a[n-1] = domain type * - tau1 = range of sigma1 * - tau2 = range of sigma2 * * The result is either the function type [a[0] ... a[n-1] --> sup(tau1, tau2)] * or NULL_TYPE. */ static type_t sup_fun_types(type_table_t *table, uint32_t n, type_t *a, type_t tau1, type_t tau2) { type_t aux; aux = super_type(table, tau1, tau2); if (aux != NULL_TYPE) { aux = function_type(table, aux, n, a); } return aux; } /* * Compute the smallest supertype of tau1 and tau2. Use the cheap * method first. If that fails, compute the result and keep the result * in the internal sup_tbl cache. */ type_t super_type(type_table_t *table, type_t tau1, type_t tau2) { tuple_type_t *tup1, *tup2; function_type_t *fun1, *fun2; int_hmap2_t *sup_tbl; int_hmap2_rec_t *r; type_t aux; assert(good_type(table, tau1) && good_type(table, tau2)); aux = cheap_sup(table, tau1, tau2); if (aux == UNKNOWN_TYPE) { /* * Cheap_sup failed. * Check whether sup(tau1, tau2) is already in the cache. * If it's not do the computation and add the * result to the cache. */ // Normalize. We want tau1 < tau2 if (tau1 > tau2) { aux = tau1; tau1 = tau2; tau2 = aux; } assert(tau1 < tau2); sup_tbl = get_sup_table(table); r = int_hmap2_find(sup_tbl, tau1, tau2); if (r != NULL) { aux = r->val; } else { /* * The result is not in the cache. */ if (table->kind[tau1] == TUPLE_TYPE) { tup1 = tuple_type_desc(table, tau1); tup2 = tuple_type_desc(table, tau2); assert(tup1->nelem == tup2->nelem); aux = sup_tuple_types(table, tup1->nelem, tup1->elem, tup2->elem); } else { fun1 = function_type_desc(table, tau1); fun2 = function_type_desc(table, tau2); assert(fun1->ndom == fun2->ndom); aux = NULL_TYPE; if (equal_type_arrays(fun1->ndom, fun1->domain, fun2->domain)) { aux = sup_fun_types(table, fun1->ndom, fun1->domain, fun1->range, fun2->range); } } int_hmap2_add(sup_tbl, tau1, tau2, aux); } } assert(aux == NULL_TYPE || good_type(table, aux)); return aux; } /* * COMMON SUBTYPE */ /* * Try to compute inf(tau1, tau2) cheaply. * Return UNKNOWN_TYPE if that fails. */ static type_t cheap_inf(type_table_t *table, type_t tau1, type_t tau2) { assert(good_type(table, tau1) && good_type(table, tau2)); if (tau1 == tau2) { return tau1; } if ((tau1 == int_id && tau2 == real_id) || (tau1 == real_id && tau2 == int_id)) { return int_id; } switch (table->kind[tau1]) { case TUPLE_TYPE: if (table->kind[tau2] != TUPLE_TYPE || tuple_type_arity(table, tau1) != tuple_type_arity(table, tau2)) { return NULL_TYPE; } break; case FUNCTION_TYPE: if (table->kind[tau2] != FUNCTION_TYPE || function_type_arity(table, tau1) != function_type_arity(table, tau2)) { return NULL_TYPE; } break; default: return NULL_TYPE; } return UNKNOWN_TYPE; } /* * Construct inf of two tuple types of equal arity n: * - first tuple components are a[0] .... a[n-1] * - second tuple components are b[0] ... b[n-1] * The result is either NULL_TYPE or (tuple s[0] ... s[n-1]) * where s[i] = inf(a[i], b[i]). */ static type_t inf_tuple_types(type_table_t *table, uint32_t n, type_t *a, type_t *b) { type_t buffer[8]; type_t *s; type_t aux; uint32_t i; /* * For intermediate results, we use a buffer of 8 types. * That should be enough in most cases. Otherwise * we allocate a larger buffer s. */ s = buffer; if (n > 8) { s = (type_t *) safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { aux = inf_type(table, a[i], b[i]); if (aux == NULL_TYPE) goto done; s[i] = aux; } aux = tuple_type(table, n, s); done: if (n > 8) { safe_free(s); } return aux; } /* * Construct inf of two function types sigma1 and sigma2 of * equal domain and arity. * - n = arity * - a[0] ... a[n-1] = domain type * - tau1 = range of sigma1 * - tau2 = range of sigma2 * * The result is either the function type [a[0] ... a[n-1] --> inf(tau1, tau2)] * or NULL_TYPE. */ static type_t inf_fun_types(type_table_t *table, uint32_t n, type_t *a, type_t tau1, type_t tau2) { type_t aux; aux = inf_type(table, tau1, tau2); if (aux != NULL_TYPE) { aux = function_type(table, aux, n, a); } return aux; } /* * Compute the largest common subtype of tau1 and tau2. Use the cheap * method first. If that fails, compute the result and keep the result * in the internal inf_tbl cache. */ type_t inf_type(type_table_t *table, type_t tau1, type_t tau2) { tuple_type_t *tup1, *tup2; function_type_t *fun1, *fun2; int_hmap2_t *inf_tbl; int_hmap2_rec_t *r; type_t aux; assert(good_type(table, tau1) && good_type(table, tau2)); aux = cheap_inf(table, tau1, tau2); if (aux == UNKNOWN_TYPE) { /* * Cheap_inf failed. * Check whether inf(tau1, tau2) is already in the cache. * If it's not do the computation and add the * result to the cache. */ // Normalize. We want tau1 < tau2 if (tau1 > tau2) { aux = tau1; tau1 = tau2; tau2 = aux; } assert(tau1 < tau2); inf_tbl = get_inf_table(table); r = int_hmap2_find(inf_tbl, tau1, tau2); if (r != NULL) { aux = r->val; } else { /* * The result is not in the cache. */ if (table->kind[tau1] == TUPLE_TYPE) { tup1 = tuple_type_desc(table, tau1); tup2 = tuple_type_desc(table, tau2); assert(tup1->nelem == tup2->nelem); aux = inf_tuple_types(table, tup1->nelem, tup1->elem, tup2->elem); } else { fun1 = function_type_desc(table, tau1); fun2 = function_type_desc(table, tau2); assert(fun1->ndom == fun2->ndom); aux = NULL_TYPE; if (equal_type_arrays(fun1->ndom, fun1->domain, fun2->domain)) { aux = inf_fun_types(table, fun1->ndom, fun1->domain, fun1->range, fun2->range); } } int_hmap2_add(inf_tbl, tau1, tau2, aux); } } assert(aux == NULL_TYPE || good_type(table, aux)); return aux; } /* * MAXIMAL SUPERTYPE */ /* * Try to cheaply compute the maximal super type of tau * - return NULL_TYPE if that fails */ static type_t cheap_max_super_type(type_table_t *table, type_t tau) { type_t sigma; sigma = NULL_TYPE; if (is_maxtype(table, tau)) { sigma = tau; } else if (tau == int_id) { sigma = real_id; } return sigma; } /* * Maximal supertype of a tuple type */ static type_t max_tuple_super_type(type_table_t *table, tuple_type_t *tup) { type_t buffer[8]; type_t *s; uint32_t i, n; type_t tau; n = tup->nelem; s = buffer; if (n > 8) { s = safe_malloc(n * sizeof(type_t)); } for (i=0; i<n; i++) { s[i] = max_super_type(table, tup->elem[i]); } tau = tuple_type(table, n, s); if (n > 8) { safe_free(s); } return tau; } /* * Maximal supertype of a function type */ static type_t max_function_super_type(type_table_t *table, function_type_t *fun) { type_t tau; tau = max_super_type(table, fun->range); return function_type(table, tau, fun->ndom, fun->domain); } /* * Build the largest type that's a supertype of tau */ type_t max_super_type(type_table_t *table, type_t tau) { int_hmap_t *max_tbl; int_hmap_pair_t *r; type_t aux; assert(good_type(table, tau)); aux = cheap_max_super_type(table, tau); if (aux == NULL_TYPE) { max_tbl = get_max_table(table); r = int_hmap_find(max_tbl, tau); if (r != NULL) { aux = r->val; } else { // max is not in the cache if (table->kind[tau] == TUPLE_TYPE) { aux = max_tuple_super_type(table, tuple_type_desc(table, tau)); } else { aux = max_function_super_type(table, function_type_desc(table,tau)); } int_hmap_add(max_tbl, tau, aux); } } assert(good_type(table, aux)); return aux; } /* * SUBTYPE AND COMPATIBILITY */ /* * Check whether tau1 is a subtype if tau2. * * Side effects: this is implemented using super_type so this may create * new types in the table. */ bool is_subtype(type_table_t *table, type_t tau1, type_t tau2) { return super_type(table, tau1, tau2) == tau2; } /* * Check whether tau1 and tau2 are compatible. * * Side effects: use the super_type function. So this may create new * types in the table. */ bool compatible_types(type_table_t *table, type_t tau1, type_t tau2) { return super_type(table, tau1, tau2) != NULL_TYPE; } /* * MACRO CONSTRUCTORS */ /* * NOTES * * 1) macro names have the same scoping mechanism as * term and type names. If a macro of a given name is * added to the table, and name refers to an existing * macro then the current mapping is hidden. It will be * restored after a call to remove_type_macro_name. * * 2) the implementation uses character strings with reference * counting (cf. refcount_strings.h). The parameter 'name' * in add_type_macro and add_type_constructor must be * the result of 'clone_string'. */ /* * Add a macro descriptor: * - name = macro name * - n = arity. It must be no more than TYPE_MACRO_MAX_ARITY * - vars = array of n type variables (must be all distinct) * - body = type */ int32_t add_type_macro(type_table_t *table, char *name, uint32_t n, const type_t *vars, type_t body) { type_mtbl_t *mtbl; type_macro_t *d; int32_t i; mtbl = get_macro_table(table); assert(body != NULL_TYPE); i = allocate_macro_id(mtbl); d = new_descriptor(name, n, vars, body); assert(! has_int_tag(d)); mtbl->data[i] = d; stbl_add(&mtbl->stbl, name, i); string_incref(name); return i; } /* * Add an uninterpreted type constructor: * - name = macro name * - n = arity. It must be no more than TYPE_MACRO_MAX_ARITY */ int32_t add_type_constructor(type_table_t *table, char *name, uint32_t n) { type_mtbl_t *mtbl; type_macro_t *d; int32_t i; mtbl = get_macro_table(table); i = allocate_macro_id(mtbl); d = new_constructor(name, n); assert(! has_int_tag(d)); mtbl->data[i] = d; stbl_add(&mtbl->stbl, name, i); string_incref(name); return i; } /* * Get a macro id of the given name * - return -1 if there's no macro with this name */ int32_t get_type_macro_by_name(type_table_t *table, const char *name) { type_mtbl_t *mtbl; int32_t id; id = -1; mtbl = table->macro_tbl; if (mtbl != NULL) { id = stbl_find(&mtbl->stbl, name); } return id; } /* * Get the descriptor for the given id * - return NULL if id is not valid (including if it refers to a deleted macro) */ type_macro_t *type_macro(type_table_t *table, int32_t id) { type_mtbl_t *mtbl; type_macro_t *macro; mtbl = table->macro_tbl; macro = NULL; if (mtbl != NULL && good_type_macro(mtbl, id)) { macro = mtbl->data[id]; } return macro; } /* * Remove the current mapping of 'name' to a macro id * - no change if 'name' does not refer to any macro * - otherwise, the current reference for 'name' is removed * and the previous mapping is restored (if any). */ void remove_type_macro_name(type_table_t *table, const char *name) { type_mtbl_t *mtbl; mtbl = table->macro_tbl; if (mtbl != NULL) { stbl_remove(&mtbl->stbl, name); } } /* * Keep alive function used in delete_type_macro: * - aux is a pointer to an integer variable and * *aux = id of the macro to delete * - r is a record in the tuple cache * - r must be deleted if its first element r->key[0] is equal to id */ static bool keep_cached_tuple_alive(void *aux, tuple_hmap_rec_t *r) { assert(r->arity > 1); return r->key[0] != *((int32_t *) aux); } /* * Remove macro of the given id * - id must be a valid macro index * - the macro name is deleted (from the symbol table) * - all instances of this macro are also deleted. */ void delete_type_macro(type_table_t *table, int32_t id) { type_mtbl_t *mtbl; type_macro_t *macro; mtbl = table->macro_tbl; assert(mtbl != NULL && good_type_macro(mtbl, id)); macro = mtbl->data[id]; stbl_remove(&mtbl->stbl, macro->name); tuple_hmap_gc(&mtbl->cache, &id, keep_cached_tuple_alive); free_macro_id(mtbl, id); } /* * Macro instance: apply a macro to the given actual parameters * - id = macro id * - n = number of actuals * - actual = array of n types (actual parameters) * - each parameter must be a valid type * - n must be equal to the macro arity. * * If the macro is a type constructor (i.e., body = NULL_TYPE) then * a new instance is constructed. * * If the macro is a not a type constructor: * - Check whether this instance already exists in mtbl->hmap. * - If so, the instance is returned, otherwise, the * instance is constructed by substituting variables in body with * the actuals. The result is stored in mtbl->hmap. */ type_t instantiate_type_macro(type_table_t *table, int32_t id, uint32_t n, const type_t *actual) { type_mtbl_t *mtbl; int32_t aux[10]; int32_t *key; tuple_hmap_rec_t *r; type_macro_t *d; bool new; uint32_t i; type_t result; // id is a good macro with arity n assert(type_macro(table, id)->arity == n); /* * By default, we use a buffer of 10 integers to store id + actuals * If more is needed, a larger array is allocated here. */ key = aux; if (n > 9) { key = (int32_t *) safe_malloc((n+1) * sizeof(int32_t)); } key[0] = id; for (i=0; i<n; i++) { key[1 + i] = actual[i]; } mtbl = table->macro_tbl; assert(mtbl != NULL); d = mtbl->data[id]; assert(d->arity == n); if (d->body == NULL_TYPE) { // type constructor: new instance result = instance_type(table, id, n, actual); } else { // check the cache r = tuple_hmap_get(&mtbl->cache, n+1, key, &new); result = r->value; if (new) { result = type_substitution(table, d->body, n, d->vars, actual); assert(tuple_hmap_find(&mtbl->cache, n+1, key) == r); // i.e. r is still valid r->value = result; } } if (n > 9) { safe_free(key); } return result; } /* * GARBAGE COLLECTION */ /* * Remove type i from the hash-consing table */ static void erase_hcons_type(type_table_t *table, type_t i) { uint32_t k; switch (table->kind[i]) { case BITVECTOR_TYPE: k = hash_bvtype(table->desc[i].integer); break; case VARIABLE_TYPE: k = hash_typevar(table->desc[i].integer); break; case TUPLE_TYPE: k = hash_tupletype(table->desc[i].ptr); break; case FUNCTION_TYPE: k = hash_funtype(table->desc[i].ptr); break; case INSTANCE_TYPE: k = hash_instancetype(table->desc[i].ptr); break; default: return; } int_htbl_erase_record(&table->htbl, k, i); } /* * Mark all descendants of i whose ids are less than ptr. * - i must be a marked type (and not already deleted) * * NOTE: we use a recursive function to propagate the marks. * That should be safe as there's little risk of stack overflow. */ static void mark_reachable_types(type_table_t *table, type_t ptr, type_t i); // mark i if it's not marked already then explore its children if i < ptr static void mark_and_explore(type_table_t *table, type_t ptr, type_t i) { if (! type_is_marked(table, i)) { type_table_set_gc_mark(table, i); if (i < ptr) { mark_reachable_types(table, ptr, i); } } } static void mark_reachable_types(type_table_t *table, type_t ptr, type_t i) { tuple_type_t *tup; function_type_t *fun; instance_type_t *inst; uint32_t n, j; assert(type_is_marked(table, i) && table->kind[i] != UNUSED_TYPE); switch (table->kind[i]) { case TUPLE_TYPE: tup = table->desc[i].ptr; n = tup->nelem; for (j=0; j<n; j++) { mark_and_explore(table, ptr, tup->elem[j]); } break; case FUNCTION_TYPE: fun = table->desc[i].ptr; mark_and_explore(table, ptr, fun->range); n = fun->ndom; for (j=0; j<n; j++) { mark_and_explore(table, ptr, fun->domain[j]); } break; case INSTANCE_TYPE: inst = table->desc[i].ptr; n = inst->arity; for (j=0; j<n; j++) { mark_and_explore(table, ptr, inst->param[j]); } break; default: break; } } /* * Propagate the marks: * - on entry: all roots are marked * - on exit: every type reachable from a root is marked */ static void mark_live_types(type_table_t *table) { uint32_t i, n; n = table->nelems; for (i=0; i<n; i++) { if (type_is_marked(table, i)) { mark_reachable_types(table, i, i); } } } /* * Iterator to mark types present in the symbol table * - aux must be a pointer to the type table * - r = live record in the symbol table so r->value * is the id of a type to preserve. */ static void mark_symbol(void *aux, const stbl_rec_t *r) { type_table_set_gc_mark(aux, r->value); } /* * Filter to remove dead types from the symbol table. * - aux must be a pointer to the type table * - r = record in the symbol table: if the function returns true, * r will be finalized then removed from the symbol table. */ static bool dead_type_symbol(void *aux, const stbl_rec_t *r) { return !type_is_marked(aux, r->value); } /* * Keep-alive function for the sup/inf caches * - record (k0, k1 --> x) is kept in the caches * if k0, k1, and x haven't been deleted * - aux is a pointer to the type table */ static bool keep_in_cache(void *aux, int_hmap2_rec_t *r) { return good_type(aux, r->k0) && good_type(aux, r->k1) && good_type(aux, r->val); } /* * Keep-alive function for the max cache * - record (k --> x) is kept if k and x haven't been deleted */ static bool keep_in_max_table(void *aux, const int_hmap_pair_t *r) { return good_type(aux, r->key) && good_type(aux, r->val); } /* * Keep-alive function for the macro instance cache * - aux is a pointer to the type table * - record r->key is an array of n integers * r->key[0] = macro id * r->key[1 ... n] = types * r->val = type * - the record is kept if all types are good */ static bool keep_in_tuple_cache(void *aux, tuple_hmap_rec_t *r) { uint32_t i, n; if (! good_type(aux, r->value)) return false; n = r->arity; assert(n > 1); for (i=1; i<n; i++) { if (! good_type(aux, r->key[i])) return false; } return true; } /* * Call the garbage collector: * - delete every type not reachable from a root * - if keep_named is true, all named types (reachable from the symbol table) * are preserved. Otherwise, all live types are marked and all references * to dead types are remove from the symbol table. * - cleanup the caches * - then clear all the marks */ void type_table_gc(type_table_t *table, bool keep_named) { uint32_t i, n; // mark every type present in the symbol table if (keep_named) { stbl_iterate(&table->stbl, table, mark_symbol); } // mark the three predefined types type_table_set_gc_mark(table, bool_id); type_table_set_gc_mark(table, int_id); type_table_set_gc_mark(table, real_id); // propagate the marks mark_live_types(table); // remove unmarked types from the symbol table if (!keep_named) { stbl_remove_records(&table->stbl, table, dead_type_symbol); } // delete every unmarked type n = table->nelems; for (i=0; i<n; i++) { if (! type_is_marked(table, i)) { erase_hcons_type(table, i); erase_type(table, i); } type_table_clr_gc_mark(table, i); } // cleanup the inf/sup caches if they exist if (table->sup_tbl != NULL) { int_hmap2_gc(table->sup_tbl, table, keep_in_cache); } if (table->inf_tbl != NULL) { int_hmap2_gc(table->inf_tbl, table, keep_in_cache); } // cleanup the max cache if (table->max_tbl != NULL) { int_hmap_remove_records(table->max_tbl, table, keep_in_max_table); } // cleanup the macro table cache too if (table->macro_tbl != NULL) { tuple_hmap_gc(&table->macro_tbl->cache, table, keep_in_tuple_cache); } }
/* * The Yices SMT Solver. Copyright 2014 SRI International. * * This program may only be used subject to the noncommercial end user * license agreement which is downloadable along with this program. */
t-init_realloc_clear.c
#include <stdlib.h> #include <stdio.h> #include <gmp.h> #include "flint.h" #include "nmod_poly.h" #include "ulong_extras.h" int main(void) { int i; FLINT_TEST_INIT(state); flint_printf("init/init2/realloc/clear...."); fflush(stdout); for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; mp_limb_t n = n_randtest_not_zero(state); nmod_poly_init2(a, n, n_randint(state, 100)); nmod_poly_clear(a); } for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; mp_limb_t n = n_randtest_not_zero(state); nmod_poly_init2(a, n, n_randint(state, 100)); nmod_poly_realloc(a, n_randint(state, 100)); nmod_poly_realloc(a, n_randint(state, 100)); nmod_poly_clear(a); } for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; mp_limb_t n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_randtest(a, state, n_randint(state, 100)); nmod_poly_clear(a); } FLINT_TEST_CLEANUP(state); flint_printf("PASS\n"); return 0; }
/* Copyright (C) 2010 William Hart This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
monomial_to_newton.c
#include <gmp.h> #include "flint.h" #include "fmpz.h" #include "fmpz_poly.h" void _fmpz_poly_monomial_to_newton(fmpz * poly, const fmpz * roots, slong n) { slong i, j; for (i = 0; i < n - 1; i++) for (j = n - 2; j >= i; j--) fmpz_addmul(poly + j, poly + j + 1, roots + i); }
/* Copyright (C) 2012 Fredrik Johansson This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
dune
(library (name foo))
protocol_client_context.ml
module Lifted_protocol = struct include Environment.Lift (Protocol) let hash = Protocol.hash end module Alpha_block_services = Block_services.Make (Lifted_protocol) (Lifted_protocol) (** Client RPC context *) class type rpc_context = object inherit Tezos_rpc.Context.generic inherit [Shell_services.chain * Shell_services.block] Environment.RPC_context .simple end (** The class [wrap_rpc_context] is a wrapper class used by the proxy mode clients. From a general-purpose Tezos_rpc.Context.generic [t], the class is augmented with shell services to provide RPC calls that are protocol-dependent. *) class wrap_rpc_context (t : Tezos_rpc.Context.generic) : rpc_context = object method base : Uri.t = t#base method generic_media_type_call = t#generic_media_type_call method call_service : 'm 'p 'q 'i 'o. (([< Resto.meth] as 'm), unit, 'p, 'q, 'i, 'o) Tezos_rpc.Service.t -> 'p -> 'q -> 'i -> 'o tzresult Lwt.t = t#call_service method call_streamed_service : 'm 'p 'q 'i 'o. (([< Resto.meth] as 'm), unit, 'p, 'q, 'i, 'o) Tezos_rpc.Service.t -> on_chunk:('o -> unit) -> on_close:(unit -> unit) -> 'p -> 'q -> 'i -> (unit -> unit) tzresult Lwt.t = t#call_streamed_service (** Abstracts variables <chain_id> and <block_id> in protocol RPCs prefixed by "/chains/<chain_id>/blocks/<block_id>/...". *) inherit [Shell_services.chain, Shell_services.block] Environment.proto_rpc_context (t :> Tezos_rpc.Context.t) Shell_services.Blocks.path end (** The class type [full] allows to create contexts that are explicitly used by low-level shell functions, while containing various information (I/O services, RPCs...). Then, depending on the usage, the type may be coerced into one of its following ascendants to serve for explicit operations on blocks, chain or daemon for instance. *) class type full = object (** The class Client_context.full provides I/O services for the client, the wallet, etc. *) inherit Client_context.full (** Base interface provided to call RPCs, i.e., communication with the node. A client context is defined by mapping all RPCs protocol-generic to a specific protocol. *) inherit [Shell_services.chain * Shell_services.block] Environment.RPC_context .simple (** Protocol RPCs exposed through the environment (using an additional chainpath). *) inherit [Shell_services.chain, Shell_services.block] Environment.proto_rpc_context end (** From a [Client_context.full], the class allows to call RPCs from the node and those defined by the protocol. *) class wrap_full (t : Client_context.full) : full = object inherit Client_context.proxy_context t inherit [Shell_services.chain, Shell_services.block] Environment.proto_rpc_context (t :> Tezos_rpc.Context.t) Shell_services.Blocks.path end let register_error_kind category ~id ~title ~description ?pp encoding from_error to_error = let id = "client." ^ Protocol.name ^ "." ^ id in register_error_kind category ~id ~title ~description ?pp encoding from_error to_error (** Initialization calls that run on start-up. Register the various protocol encodings. *) let () = let open Data_encoding.Registration in register Protocol.Alpha_context.Lazy_storage.encoding ; register ~pp:Protocol.Alpha_context.Fitness.pp @@ Protocol.Alpha_context.Fitness.encoding ; (* These encodings are missing a def field which we add before registering them. These defs should be moved inside their encodings in the protocol code. *) let def id ids ?title ?description encoding = Data_encoding.def (String.concat "." (Protocol.name :: id :: ids)) ?title ?description encoding in register @@ def "parameters" [] Protocol.Parameters_repr.encoding ; register ~pp:Protocol.Alpha_context.Tez.pp @@ def "tez" [] Protocol.Alpha_context.Tez.encoding ; register ~pp:Protocol.Alpha_context.Timestamp.pp @@ def "timestamp" [] Protocol.Alpha_context.Timestamp.encoding ; register ~pp:Protocol.Alpha_context.Raw_level.pp @@ def "raw_level" [] Protocol.Alpha_context.Raw_level.encoding ; register @@ def "vote" ["ballot"] Protocol.Alpha_context.Vote.ballot_encoding ; register @@ def "vote" ["ballots"] Protocol.Alpha_context.Vote.ballots_encoding ; register @@ def "vote" ["listings"] Protocol.Alpha_context.Vote.listings_encoding ; register @@ def "seed" [] Protocol.Alpha_context.Seed.seed_encoding ; register ~pp:Protocol.Alpha_context.Gas.pp @@ def "gas" [] Protocol.Alpha_context.Gas.encoding ; register ~pp:Protocol.Alpha_context.Gas.pp_cost @@ def "gas" ["cost"] Protocol.Alpha_context.Gas.cost_encoding ; register @@ def "script" [] Protocol.Alpha_context.Script.encoding ; register @@ def "script" ["expr"] Protocol.Alpha_context.Script.expr_encoding ; register @@ def "script" ["prim"] Protocol.Alpha_context.Script.prim_encoding ; register @@ def "script" ["lazy_expr"] Protocol.Alpha_context.Script.lazy_expr_encoding ; register @@ def "script" ["loc"] Protocol.Alpha_context.Script.location_encoding ; register ~pp:Protocol.Alpha_context.Contract.pp @@ def "contract" [] Protocol.Alpha_context.Contract.encoding ; register @@ def "receipt" ["balance_updates"] Protocol.Alpha_context.Receipt.balance_updates_encoding ; register ~pp:Protocol.Alpha_context.Level.pp_full @@ def "level" [] Protocol.Alpha_context.Level.encoding ; register @@ def "operation" [] Protocol.Alpha_context.Operation.encoding ; register @@ def "operation" ["contents"] Protocol.Alpha_context.Operation.contents_encoding ; register @@ def "operation" ["contents_list"] Protocol.Alpha_context.Operation.contents_list_encoding ; register @@ def "operation" ["protocol_data"] Protocol.Alpha_context.Operation.protocol_data_encoding ; register @@ def "operation" ["raw"] Protocol.Alpha_context.Operation.raw_encoding ; register @@ def "operation" ["internal"] Protocol.Apply_internal_results.internal_operation_encoding ; register @@ def "operation" ["unsigned"] Protocol.Alpha_context.Operation.unsigned_encoding ; register ~pp:Protocol.Alpha_context.Period.pp @@ def "period" [] Protocol.Alpha_context.Period.encoding ; register ~pp:Protocol.Alpha_context.Cycle.pp @@ def "cycle" [] Protocol.Alpha_context.Cycle.encoding ; register @@ def "constants" [] Protocol.Alpha_context.Constants.encoding ; register @@ def "constants" ["fixed"] Protocol.Alpha_context.Constants.fixed_encoding ; register @@ def "constants" ["parametric"] Protocol.Alpha_context.Constants.Parametric.encoding ; register @@ def "nonce" [] Protocol.Alpha_context.Nonce.encoding ; register @@ def "block_header" [] Protocol.Alpha_context.Block_header.encoding ; register @@ def "block_header" ["unsigned"] Protocol.Alpha_context.Block_header.unsigned_encoding ; register @@ def "block_header" ["raw"] Protocol.Alpha_context.Block_header.raw_encoding ; register @@ def "block_header" ["contents"] Protocol.Alpha_context.Block_header.contents_encoding ; register @@ def "block_header" ["shell_header"] Protocol.Alpha_context.Block_header.shell_header_encoding ; register @@ def "block_header" ["protocol_data"] Protocol.Alpha_context.Block_header.protocol_data_encoding ; register ~pp:Protocol.Alpha_context.Voting_period.pp @@ def "voting_period" [] Protocol.Alpha_context.Voting_period.encoding ; register @@ def "voting_period" ["kind"] Protocol.Alpha_context.Voting_period.kind_encoding ; register @@ def "errors" [] ~description: "The full list of RPC errors would be too long to include.It is\n\ available through the RPC `/errors` (GET)." error_encoding
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
gButton.ml
(**************************************************************************) (* Lablgtk *) (* *) (* This program is free software; you can redistribute it *) (* and/or modify it under the terms of the GNU Library General *) (* Public License as published by the Free Software Foundation *) (* version 2, with the exception described in file COPYING which *) (* comes with the library. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General *) (* Public License along with this program; if not, write to the *) (* Free Software Foundation, Inc., 59 Temple Place, Suite 330, *) (* Boston, MA 02111-1307 USA *) (* *) (* *) (**************************************************************************) (* $Id$ *) open Gaux open Gobject open Gtk open GtkBase open GtkButton open OgtkButtonProps open GObj open GContainer class button_skel obj = object (self) inherit bin obj inherit button_props method clicked () = Button.clicked obj method grab_default () = set Widget.P.can_default obj true; set Widget.P.has_default obj true method event = new GObj.event_ops obj method unset_image () = Gobject.Property.set_dyn obj GtkButtonProps.Button.P.image.Gobject.name (`OBJECT None) end class button_signals obj = object inherit container_signals_impl (obj : [> button] obj) inherit button_sigs end class button obj = object inherit button_skel (obj : Gtk.button obj) method connect = new button_signals obj end let pack_return create p ?packing ?show () = pack_return (create p) ~packing ~show let button ?label = Button.make_params [] ?label ~cont:( pack_return (fun p -> new button (Button.create p))) class toggle_button_signals obj = object (self) inherit button_signals obj method toggled = self#connect ToggleButton.S.toggled end class toggle_button obj = object inherit button_skel obj method connect = new toggle_button_signals obj method active = get ToggleButton.P.active obj method set_active = set ToggleButton.P.active obj method set_draw_indicator = set ToggleButton.P.draw_indicator obj end let make_toggle_button create ?label = Button.make_params [] ?label ~cont:( ToggleButton.make_params ~cont:( pack_return (fun p -> new toggle_button (create p)))) let toggle_button = make_toggle_button ToggleButton.create let check_button = make_toggle_button ToggleButton.create_check class radio_button obj = object inherit toggle_button (obj : Gtk.radio_button obj) method set_group = set RadioButton.P.group obj method group = Some obj end let radio_button ?group = Button.make_params [] ~cont:( ToggleButton.make_params ~cont:( pack_return (fun p -> new radio_button (RadioButton.create ?group p)))) class color_button_signals obj = object (self) inherit button_signals obj method color_set = self#connect ColorButton.S.color_set end class color_button obj = object inherit button_skel obj inherit color_button_props method connect = new color_button_signals obj end let color_button = ColorButton.make_params [] ~cont:( pack_return (fun pl -> new color_button (ColorButton.create pl))) class font_button_signals obj = object (self) inherit button_signals obj method font_set = self#connect FontButton.S.font_set end class font_button obj = object inherit button_skel obj inherit font_button_props method connect = new font_button_signals obj end let font_button = FontButton.make_params [] ~cont:( pack_return (fun pl -> new font_button (FontButton.create pl))) (* Toolbar *) class type tool_item_o = object method as_tool_item : Gtk.tool_item obj end class toolbar_signals obj = object inherit GContainer.container_signals_impl obj inherit toolbar_sigs end class toolbar obj = object inherit container (obj : Gtk.toolbar obj) method connect = new toolbar_signals obj method insert_widget ?tooltip ?tooltip_private ?pos w = Toolbar.insert_widget obj (as_widget w) ?tooltip ?tooltip_private ?pos method insert_button ?text ?tooltip ?tooltip_private ?icon ?pos ?callback () = let icon = may_map icon ~f:as_widget in new button (Toolbar.insert_button obj ~kind:`BUTTON ?icon ?text ?tooltip ?tooltip_private ?pos ?callback ()) method insert_toggle_button ?text ?tooltip ?tooltip_private ?icon ?pos ?callback () = let icon = may_map icon ~f:as_widget in new toggle_button (ToggleButton.cast (Toolbar.insert_button obj ~kind:`TOGGLEBUTTON ?icon ?text ?tooltip ?tooltip_private ?pos ?callback ())) method insert_radio_button ?text ?tooltip ?tooltip_private ?icon ?pos ?callback () = let icon = may_map icon ~f:as_widget in new radio_button (RadioButton.cast (Toolbar.insert_button obj ~kind:`RADIOBUTTON ?icon ?text ?tooltip ?tooltip_private ?pos ?callback ())) method insert_space = Toolbar.insert_space obj method orientation = get Toolbar.P.orientation obj method set_orientation = set Toolbar.P.orientation obj method style = get Toolbar.P.toolbar_style obj method set_style = set Toolbar.P.toolbar_style obj method unset_style () = Toolbar.unset_style obj method get_tooltips = Toolbar.get_tooltips obj method set_tooltips = Toolbar.set_tooltips obj method icon_size = Toolbar.get_icon_size obj method set_icon_size = Toolbar.set_icon_size obj method unset_icon_size () = Toolbar.unset_icon_size obj (* extended API in GTK 2.4 *) method show_arrow = get Toolbar.P.show_arrow obj method set_show_arrow = set Toolbar.P.show_arrow obj method insert : 'a. ?pos:int -> (#tool_item_o as 'a) -> unit = fun ?(pos= -1) i -> Toolbar.insert obj i#as_tool_item ~pos method get_item_index : 'a. (#tool_item_o as 'a) -> int = fun i -> Toolbar.get_item_index obj i#as_tool_item method get_n_items = Toolbar.get_n_items obj method get_nth_item = Toolbar.get_nth_item obj method get_drop_index = Toolbar.get_drop_index obj method set_drop_highlight_item : 'a. ((#tool_item_o as 'a) * int) option -> unit = function | None -> Toolbar.set_drop_highlight_item obj None 0 | Some (i, pos) -> Toolbar.set_drop_highlight_item obj (Some i#as_tool_item) pos method relief_style = Toolbar.get_relief_style obj end let toolbar ?orientation ?style ?tooltips = pack_container [] ~create:(fun p -> let w = Toolbar.create p in Toolbar.set w ?orientation ?style ?tooltips; new toolbar w) (* New extended API in GTK 2.4 *) let may_cons = Gobject.Property.may_cons class tool_item_skel obj = object inherit [[> Gtk.tool_item]] GContainer.bin_impl obj inherit OgtkButtonProps.tool_item_props method as_tool_item = (obj :> Gtk.tool_item obj) method set_homogeneous = ToolItem.set_homogeneous obj method get_homogeneous = ToolItem.get_homogeneous obj method set_expand = ToolItem.set_expand obj method get_expand = ToolItem.get_expand obj method set_tooltip (t : GData.tooltips) = ToolItem.set_tooltip obj t#as_tooltips method set_use_drag_window = ToolItem.set_use_drag_window obj method get_use_drag_window = ToolItem.get_use_drag_window obj end class tool_item obj = object inherit tool_item_skel obj method connect = new GContainer.container_signals_impl obj end let tool_item_params create pl ?homogeneous ?expand ?packing ?show () = let item = create pl in Gaux.may item#set_homogeneous homogeneous ; Gaux.may item#set_expand expand ; Gaux.may (fun f -> (f (item :> tool_item_o) : unit)) packing ; if show <> Some false then item#misc#show () ; item let tool_item = tool_item_params (fun pl -> new tool_item (ToolItem.create pl)) [] class separator_tool_item obj = object inherit tool_item obj method draw = get SeparatorToolItem.P.draw obj method set_draw = set SeparatorToolItem.P.draw obj end let separator_tool_item ?draw = let pl = may_cons SeparatorToolItem.P.draw draw [] in tool_item_params (fun pl -> new separator_tool_item (SeparatorToolItem.create pl)) pl class tool_button_signals (obj : [> Gtk.tool_button] obj) = object (self) inherit GContainer.container_signals_impl obj method clicked = self#connect ToolButton.S.clicked end class tool_button_skel obj = object inherit tool_item_skel obj inherit tool_button_props end class tool_button obj = object inherit tool_button_skel obj method connect = new tool_button_signals obj end let tool_button_params create pl ?label ?stock ?use_underline = tool_item_params create (may_cons ToolButton.P.label label ( may_cons ToolButton.P.stock_id stock ( may_cons ToolButton.P.use_underline use_underline pl))) let tool_button = tool_button_params (fun pl -> new tool_button (ToolButton.create pl)) [] class toggle_tool_button_signals obj = object (self) inherit tool_button_signals obj method toggled = self#connect ToggleToolButton.S.toggled end class toggle_tool_button obj = object inherit tool_button_skel obj method connect = new toggle_tool_button_signals obj method set_active = ToggleToolButton.set_active obj method get_active = ToggleToolButton.get_active obj end let toggle_tool_button_params create pl ?active = tool_button_params (fun pl -> let o = create pl in Gaux.may o#set_active active ; o) pl let toggle_tool_button = toggle_tool_button_params (fun pl -> new toggle_tool_button (ToggleToolButton.create pl)) [] class radio_tool_button obj = object inherit toggle_tool_button obj method group = Some (obj :> Gtk.radio_tool_button Gtk.obj) method set_group = set RadioToolButton.P.group obj end let radio_tool_button ?group = toggle_tool_button_params (fun pl -> new radio_tool_button (RadioToolButton.create pl)) (may_cons RadioToolButton.P.group (Gaux.may_map (fun g -> g#group) group) []) class menu_tool_button obj = object inherit tool_button obj method menu = get MenuToolButton.P.menu obj method set_menu = set MenuToolButton.P.menu obj method set_arrow_tooltip (t : GData.tooltips) = MenuToolButton.set_arrow_tooltip obj t#as_tooltips end let menu_tool_button ?menu = tool_button_params (fun pl -> new menu_tool_button (MenuToolButton.create pl)) (may_cons MenuToolButton.P.menu (Gaux.may_map (fun m -> m#as_menu) menu) []) class link_button obj = object inherit button_skel obj inherit link_button_props end let link_button ?label = pack_return (fun uri -> new link_button (match label with | None -> LinkButton.create uri | Some s -> LinkButton.create_with_label uri s))
(**************************************************************************) (* Lablgtk *) (* *) (* This program is free software; you can redistribute it *) (* and/or modify it under the terms of the GNU Library General *) (* Public License as published by the Free Software Foundation *) (* version 2, with the exception described in file COPYING which *) (* comes with the library. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General *) (* Public License along with this program; if not, write to the *) (* Free Software Foundation, Inc., 59 Temple Place, Suite 330, *) (* Boston, MA 02111-1307 USA *) (* *) (* *) (**************************************************************************)
gen_model.ml
open Location open Ident open Tools module A = Ast module M = Model exception Anomaly type error_desc = | CannotConvertToAssignOperator | CannotExtractBody | AnyNotAuthorizedInTransitionTo | NoRemoveAllOnCollection | RecordNotFound of ident [@@deriving show {with_path = false}] type error = Location.t * error_desc let pp_error_desc fmt = function | CannotConvertToAssignOperator -> Format.fprintf fmt "cannot convert to assign operator" | CannotExtractBody -> Format.fprintf fmt "cannot extract body" | AnyNotAuthorizedInTransitionTo -> Format.fprintf fmt "any not authorized in transition to" | NoRemoveAllOnCollection -> Format.fprintf fmt "remove all cannot be called for a collection of asset" | RecordNotFound id -> Format.fprintf fmt "record not found: %s" id let emit_error (lc, error : Location.t * error_desc) = let str : string = Format.asprintf "%a@." pp_error_desc error in let pos : Position.t list = [location_to_position lc] in Error.error_alert pos str (fun _ -> ()) let bailout = fun () -> raise (Error.Stop 5) let to_mident ((nm, id) : A.longident) : M.mident = M.mk_mident ~namespace:(Option.to_list nm) id let unloc_longident (lid : A.longident) : ident = unloc (snd lid) let longident_to_lident (lid : A.longident) : M.lident = snd lid let longident_to_mident (lid : A.longident) : M.mident = ((match fst lid with | Some v -> [v] | None -> []), snd lid) type env = { formula: bool; asset_name: ident option; function_p: (M.mident * (M.mident * M.type_ * M.mterm option) list) option } [@@deriving show {with_path = false}] let mk_env ?(formula=false) ?asset_name ?function_p () = { formula; asset_name; function_p } let to_model (ast : A.ast) : M.model = let to_currency = function | A.Utz -> M.Utz in let vtyp_to_btyp = function | A.VTunit -> M.Bunit | A.VTbool -> M.Bbool | A.VTnat -> M.Bnat | A.VTint -> M.Bint | A.VTrational -> M.Brational | A.VTdate -> M.Bdate | A.VTduration -> M.Bduration | A.VTstring -> M.Bstring | A.VTaddress -> M.Baddress | A.VTcurrency -> M.Bcurrency | A.VTsignature -> M.Bsignature | A.VTkey -> M.Bkey | A.VTkeyhash -> M.Bkeyhash | A.VTbytes -> M.Bbytes | A.VTchainid -> M.Bchainid | A.VTbls12_381_fr -> M.Bbls12_381_fr | A.VTbls12_381_g1 -> M.Bbls12_381_g1 | A.VTbls12_381_g2 -> M.Bbls12_381_g2 | A.VTnever -> M.Bnever | A.VTchest -> M.Bchest | A.VTchest_key -> M.Bchest_key | A.VTtx_rollup_l2_address -> M.Btx_rollup_l2_address in let to_trtyp = function | A.TRentry -> M.TRentry | A.TRaction -> M.TRaction | A.TRasset -> M.TRasset | A.TRfield -> M.TRfield in let rec type_to_type (t : A.type_) : M.type_ = let f = function | A.Tnamed _ -> assert false | A.Tasset id -> M.Tasset (to_mident id) | A.Trecord id -> M.Trecord (to_mident id) | A.Tenum id -> M.Tenum (to_mident id) | A.Tevent id -> M.Tevent (to_mident id) | A.Tbuiltin b -> M.Tbuiltin (vtyp_to_btyp b) | A.Tcontainer (t, Collection) -> M.Tcontainer (type_to_type t, Collection) | A.Tcontainer (t, Aggregate) -> M.Tcontainer (type_to_type t, Aggregate) | A.Tcontainer (t, Partition) -> M.Tcontainer (type_to_type t, Partition) | A.Tcontainer (t, AssetContainer) -> M.Tcontainer (type_to_type t, AssetContainer) | A.Tcontainer (t, AssetKey) -> M.Tcontainer (type_to_type t, AssetKey) | A.Tcontainer (t, AssetValue) -> M.Tcontainer (type_to_type t, AssetValue) | A.Tcontainer (t, AssetView) -> M.Tcontainer (type_to_type t, View) | A.Tset t -> M.Tset (type_to_type t) | A.Tlist t -> M.Tlist (type_to_type t) | A.Tmap (k, v) -> M.Tmap (type_to_type k, type_to_type v) | A.Tbig_map (k, v) -> M.Tbig_map (type_to_type k, type_to_type v) | A.Titerable_big_map (k, v) -> M.Titerable_big_map (type_to_type k, type_to_type v) | A.Tor (l, r) -> M.Tor (type_to_type l, type_to_type r) | A.Tlambda (a, r) -> M.Tlambda (type_to_type a, type_to_type r) | A.Ttuple l -> M.Ttuple (List.map type_to_type l) | A.Toperation -> M.Toperation | A.Tcontract t -> M.Tcontract (type_to_type t) | A.Toption t -> M.Toption (type_to_type t) | A.Tticket t -> M.Tticket (type_to_type t) | A.Ttrace tr -> M.Ttrace (to_trtyp tr) | A.Tsapling_state n -> M.Tsapling_state n | A.Tsapling_transaction n -> M.Tsapling_transaction n in M.mktype (f t) in let to_assignment_operator = function | A.ValueAssign -> M.ValueAssign | A.PlusAssign -> M.PlusAssign | A.MinusAssign -> M.MinusAssign | A.MultAssign -> M.MultAssign | A.DivAssign -> M.DivAssign | A.AndAssign -> M.AndAssign | A.OrAssign -> M.OrAssign in let to_pattern_node (n : A.pattern_node) : M.pattern_node = match n with | A.Mconst (id, xs) -> M.Pconst (M.mk_mident id, xs) | A.Mwild -> M.Pwild in let to_pattern (p : A.pattern) : M.pattern = let node = to_pattern_node p.node in M.mk_pattern node ~loc:p.loc in let to_comparison (op : A.comparison_operator) : M.comparison_operator = match op with | Gt -> Gt | Ge -> Ge | Lt -> Lt | Le -> Le | _ -> assert false in let term_arg_to_expr : 't. (A.pterm -> M.mterm) -> (A.pterm_arg) -> M.mterm = fun f a -> match a with | A.AExpr x -> f x | _ -> assert false (*| A.AEffect l -> M.AEffect (List.map (fun (id, op, term) -> (id, to_assignment_operator2 op, f term)) l) | A.AFun _ -> assert false (* TODO *)*) in let fail (ft : M.fail_type) : M.mterm = M.mk_mterm (Mfail ft) M.tunit in let term_not x : M.mterm = M.mk_mterm (M.Mnot x) M.tbool in (* let unit : M.mterm = M.mk_mterm (M.Mseq []) M.Tunit in *) (* let is_list (mt : mterm) = match mt with | {type_ = Tcontainer (a, _); _} -> unloc asset_name | _ -> Format.printf "extract_asset_name error: %a@\n" M.pp_type_ mterm.type_; assert false in *) let is_asset_container (v : A.pterm) : bool = match v with | {type_ = Some (Tcontainer (Tasset _, _)); _} -> true | _ -> false in let is_list (v : A.pterm) : bool = match v with | {type_ = Some (Tlist _); _} -> true | _ -> false in let extract_asset_name_from_type (t : M.type_) : ident = match M.get_ntype t with | M.Tcontainer ((Tasset asset_name, _), _) -> M.unloc_mident asset_name | _ -> assert false in let extract_asset_name (mterm : M.mterm) : ident = extract_asset_name_from_type mterm.type_ in let _extract_field_name (_id, _type_, body : A.lident * A.ptyp * A.pterm) : M.lident = match body.node with | A.Pdot (_, fn) -> fn | _ -> Format.printf "extract_field_name error: %a@\n" A.pp_pterm body; assert false in let extract_builtin_type_list (v : M.mterm) : M.type_ = match v with | {type_ = (Tlist t, _); _} -> t | _ -> assert false in let extract_builtin_type_set (v : M.mterm) : M.type_ = match v with | {type_ = (Tset t, _); _} -> t | _ -> assert false in let extract_builtin_type_map (v : M.mterm) : M.map_kind * M.type_ * M.type_ = match v with | {type_ = (Tmap (k, v), _); _} -> MKMap, k, v | {type_ = (Tbig_map (k, v), _); _} -> MKBigMap, k, v | {type_ = (Titerable_big_map (k, v), _); _} -> MKIterableBigMap, k, v | _ -> assert false in let to_entry_description (ad : A.entry_description) : M.entry_description = match ad with | ADAny -> M.ADany | ADOp ("add", id) -> M.ADadd (unloc id) | ADOp ("remove", id) -> M.ADremove (unloc id) | ADOp ("update", id) -> M.ADupdate (unloc id) | ADOp ("transfer", id) -> M.ADtransfer (unloc id) | ADOp ("get", id) -> M.ADget (unloc id) | ADOp ("iterate", id) -> M.ADiterate (unloc id) | ADOp ("call", id) -> M.ADcall (unloc id) | _ -> assert false in let to_ck (env : env) (fp : M.mterm) : M.container_kind = match fp.node, fp.type_ with | M.Mdotassetfield (an, _k, fn), (Tcontainer ((Tasset _, _), (Aggregate | Partition)), _) -> M.CKfield (M.unloc_mident an, M.unloc_mident fn, fp, Tnone, Dnone) | M.Mdot ({type_ = (Tasset an, _)}, fn), (Tcontainer ((Tasset _, _), (Aggregate | Partition)), _) -> M.CKfield (M.unloc_mident an, M.unloc_mident fn, fp, Tnone, Dnone) | M.Mvar (v, Vdefinition, _, _), _ -> M.CKdef (M.unloc_mident v) | M.Mvar (fn, _, t, d), (Tcontainer ((Tasset _, _), (Aggregate | Partition)), _) -> begin let an = match env.asset_name with | Some v -> v | None -> assert false in M.CKfield (an, M.unloc_mident fn, fp, t, d) end | M.Mvar (_, _, t, d), (Tcontainer ((Tasset _, _), Collection), _) -> M.CKcoll (t, d) | _, (Tcontainer ((Tasset _, _), Collection), _) -> M.CKcoll (Tnone, Dnone) | _ -> M.CKview fp in let is_param env (id : M.mident) = match env.function_p with | Some (_, l) -> l |> List.map proj3_1 |> List.exists (fun x -> String.equal (M.unloc_mident id) (M.unloc_mident x)) | _ -> false in let get_kind_var env (id : M.mident) = if is_param env id then M.Vparam else M.Vlocal in let build_mvar ?(loc = Location.dummy) env (id : M.mident) t = M.mk_mterm (Mvar (id, get_kind_var env id, Tnone, Dnone)) t ~loc:loc in let rec to_mterm (env : env) (pterm : A.pterm) : M.mterm = let to_temp = function | A.VTbefore -> M.Tbefore | A.VTat lbl -> M.Tat lbl | A.VTnone -> M.Tnone in let to_delta = function | A.Vadded -> M.Dadded | A.Vremoved -> M.Dremoved | A.Vunmoved -> M.Dunmoved | A.Vnone -> M.Dnone in let is_record t = match M.get_ntype t with | M.Trecord _ | M.Tevent _ -> true | _ -> false in let type_ = type_to_type (Option.get pterm.type_) in let f x = to_mterm env x in let node = match pterm.node with | A.Pif (c, t, e) -> M.Mexprif (f c, f t, f e) | A.Pmatchwith (m, l) -> M.Mexprmatchwith (f m, List.map (fun (p, e) -> (to_pattern p, f e)) l) | A.Pmatchoption (x, id, ve, ne) -> M.Mmatchoption (f x, M.mk_mident id, f ve, f ne) | A.Pmatchor (x, lid, le, rid, re) -> M.Mmatchor (f x, M.mk_mident lid, f le, M.mk_mident rid, f re) | A.Pmatchlist (x, hid, tid, hte, ee) -> M.Mmatchlist (f x, M.mk_mident hid, M.mk_mident tid, f hte, f ee) | A.Pfold (x, i, e) -> M.Mfold (f x, M.mk_mident i, f e) | A.Pmap (x, i, e) -> M.Mmap (f x, M.mk_mident i, f e) | A.Plogical (A.And, l, r) -> M.Mand (f l, f r) | A.Plogical (A.Or, l, r) -> M.Mor (f l, f r) | A.Plogical (A.Xor, l, r) -> M.Mxor (f l, f r) | A.Plogical (A.Imply, l, r) -> M.Mimply (f l, f r) | A.Plogical (A.Equiv, l, r) -> M.Mequiv (f l, f r) | A.Pnot e -> M.Mnot (f e) | A.Pmulticomp (e, l) -> M.Mmulticomp (f e, List.map (fun (op, e) -> (to_comparison op, f e)) l) | A.Pcomp (A.Equal, l, r) -> let l = f l in M.Mequal (l.type_, l, f r) | A.Pcomp (A.Nequal, l, r) -> let l = f l in M.Mnequal (l.type_, l, f r) | A.Pcomp (A.Gt, l, r) -> M.Mgt (f l, f r) | A.Pcomp (A.Ge, l, r) -> M.Mge (f l, f r) | A.Pcomp (A.Lt, l, r) -> M.Mlt (f l, f r) | A.Pcomp (A.Le, l, r) -> M.Mle (f l, f r) | A.Parith (A.Plus, l, r) -> M.Mplus (f l, f r) | A.Parith (A.Minus, l, r) -> M.Mminus (f l, f r) | A.Parith (A.Mult, l, r) -> M.Mmult (f l, f r) | A.Parith (A.DivRat, l, r) -> M.Mdivrat (f l, f r) | A.Parith (A.DivEuc, l, r) -> M.Mdiveuc (f l, f r) | A.Parith (A.Modulo, l, r) -> M.Mmodulo (f l, f r) | A.Parith (A.DivMod, l, r) -> M.Mdivmod (f l, f r) | A.Puarith (A.Uminus, e) -> begin match f e with | {node = Mint n } -> M.Mint (Big_int.minus_big_int n) | v -> M.Muminus v end | A.Parith (A.ThreeWayCmp, l, r) -> M.MthreeWayCmp (f l, f r) | A.Parith (A.ShiftLeft, l, r) -> M.Mshiftleft (f l, f r) | A.Parith (A.ShiftRight, l, r) -> M.Mshiftright (f l, f r) | A.Precord l when is_record type_ -> begin let record_name = match M.get_ntype type_ with | M.Trecord name | M.Tevent name -> M.unloc_mident name | _ -> assert false in let ids, k = List.fold_left (fun accu (x : A.decl_) -> match x with | A.Drecord r when String.equal (unloc_longident r.name) record_name -> (List.map (fun (x : A.decl_gen) -> unloc x.name) r.fields, `Record) | A.Devent r when String.equal (unloc_longident r.name) record_name -> (List.map (fun (x : A.decl_gen) -> unloc x.name) r.fields, `Event) | _ -> accu) ([], `None) ast.decls in if List.length ids <> List.length l then (emit_error (pterm.loc, RecordNotFound record_name); bailout ()); match k with | `Record -> M.Mlitrecord (List.map2 (fun x y -> x, f y) ids l) | `Event -> M.Mlitevent (List.map2 (fun x y -> x, f y) ids l) | `None -> assert false end | A.Precord l -> M.Masset (List.map f l) | A.Precupdate (e, l) -> M.Mrecupdate (f e, List.map (fun (id, v) -> unloc id, f v) l) | A.Pletin (id, init, typ, body, o) -> M.Mletin ([M.mk_mident id], LVsimple (f init), Option.map type_to_type typ, f body, Option.map f o) | A.Pdeclvar (i, t, v, c) -> M.Mdeclvar ([M.mk_mident i], Option.map type_to_type t, f v, c) (* enum value *) | A.Pvar (_b, _vs, id) when A.Utils.is_enum_value ast (longident_to_lident id) -> M.Menumval (to_mident id, [], A.Utils.get_enum_values ast (longident_to_lident id) |> Option.get |> unloc) | A.Pcall (_, Cid id, [], args) when A.Utils.is_enum_value ast id -> M.Menumval (M.mk_mident id, List.map (function | A.AExpr x -> f x | _ -> assert false) args, A.Utils.get_enum_values ast id |> Option.get |> unloc) | A.Pvar (b, vs, (_, { pldesc = "state" })) -> M.Mvar (M.mk_mident (dumloc ""), Vstate, to_temp b, to_delta vs) | A.Pvar (b, vs, id) when is_param env (to_mident id) -> M.Mvar (to_mident id, Vparam, to_temp b, to_delta vs) | A.Pvar (b, vs, id) when A.Utils.is_variable ast (longident_to_lident id) -> M.Mvar (to_mident id, Vstorevar, to_temp b, to_delta vs) | A.Pvar (b, vs, id) when A.Utils.is_asset ast (longident_to_lident id) -> M.Mvar (to_mident id, Vstorecol, to_temp b, to_delta vs) | A.Pvar (b, vs, id) when A.Utils.is_definition ast (longident_to_lident id) -> M.Mvar (to_mident id, Vdefinition, to_temp b, to_delta vs) | A.Pvar (b, vs, id) when A.Utils.is_parameter ast (longident_to_lident id) -> M.Mvar (to_mident id, Vparameter, to_temp b, to_delta vs) | A.Pvar (b, vs, id) -> M.Mvar (to_mident id, Vlocal, to_temp b, to_delta vs) | A.Parray l -> begin let l = List.map f l in match M.get_ntype type_ with | Tcontainer ((Tasset _, _), _) -> M.Massets l | Tset _ -> M.Mlitset l | Tmap ( _, _) -> M.Mlitmap (MKMap, List.map (fun (x : M.mterm) -> match x.node with | M.Mtuple [k; v] -> (k, v) | _ -> assert false) l) | Tbig_map ( _, _) -> M.Mlitmap (MKBigMap, List.map (fun (x : M.mterm) -> match x.node with | M.Mtuple [k; v] -> (k, v) | _ -> assert false) l) | Titerable_big_map (_, _) -> M.Mlitmap (MKIterableBigMap, List.map (fun (x : M.mterm) -> match x.node with | M.Mtuple [k; v] -> (k, v) | _ -> assert false) l) | _ -> M.Mlitlist l end | A.Plit ({node = BVint i; _}) -> M.Mint i | A.Plit ({node = BVnat i; _}) -> M.Mnat i | A.Plit ({node = BVbool b; _}) -> M.Mbool b | A.Plit ({node = BVrational (d, n); _}) -> M.Mrational (d, n) | A.Plit ({node = BVdate s; _}) -> M.Mdate s | A.Plit ({node = BVstring s; _}) -> M.Mstring s | A.Plit ({node = BVcurrency (c, i); _}) -> M.Mcurrency (i, to_currency c) | A.Plit ({node = BVaddress s; _}) -> M.Maddress s | A.Plit ({node = BVtx_rollup_l2_address s; _}) -> M.Mtx_rollup_l2_address s | A.Plit ({node = BVduration d; _}) -> M.Mduration d | A.Plit ({node = BVbytes v; _}) -> M.Mbytes v | A.Plit ({node = BVunit; _}) -> M.Munit | A.Pdot (e, id) -> begin match e with | {node = Pcall (Some a, Cconst Cget, [], [AExpr k])} -> begin let b = f a in match M.get_ntype b.type_ with | M.Tcontainer ((Tasset an, _), Collection) -> M.Mdotassetfield (an, f k, M.mk_mident id) | _ -> M.Mdot (f e, M.mk_mident id) end | _ -> (* handle dot contract too *) M.Mdot (f e, M.mk_mident id) end (* | A.Pquestion (e, id, edv) -> assert false *) | A.Pquestiondot (e, id) -> M.Mquestionoption (f e, M.mk_mident id) | A.Pconst Cstate -> M.Mvar(M.mk_mident (dumloc ""), Vstate, Tnone, Dnone) | A.Pconst Cnow -> M.Mnow | A.Pconst Ctransferred -> M.Mtransferred | A.Pconst Ccaller -> M.Mcaller | A.Pconst Cbalance -> M.Mbalance | A.Pconst Csource -> M.Msource | A.Pconst Cselfaddress -> M.Mselfaddress | A.Pconst Cselfchainid -> M.Mselfchainid | A.Pconst Coperations -> M.Moperations | A.Pconst Cmetadata -> M.Mmetadata | A.Pconst Ctotalvotingpower -> M.Mtotalvotingpower | A.Pconst Clevel -> M.Mlevel | A.Pconst Cminblocktime -> M.Mminblocktime | A.Pconst c -> Format.eprintf "expr const unkown: %a@." A.pp_const c; assert false | A.Ptuple l -> M.Mtuple (List.map f l) | A.Ptupleaccess (p, idx) -> M.Mtupleaccess (f p, idx) | A.Pnone -> M.Mnone | A.Psome a -> M.Msome (f a) | A.Pleft (t, x) -> M.Mleft (type_to_type t, f x) | A.Pright (t, x) -> M.Mright (type_to_type t, f x) | A.Plambda (rt, id, at, e) -> M.Mlambda (type_to_type rt, M.mk_mident id, type_to_type at, f e) | A.Pcast (src, dst, v) -> begin let v = f v in match src, dst, v with | A.Tbuiltin VTnat, A.Tbuiltin VTint, { node = Mnat v; _} -> M.Mint v | A.Tbuiltin VTnat, A.Tbuiltin VTint, _ -> M.Mnattoint v | A.Tbuiltin VTnat, A.Tbuiltin VTrational, _ -> M.Mnattorat v | A.Tbuiltin VTint, A.Tbuiltin VTrational, _ -> M.Minttorat v | A.Tbuiltin VTbls12_381_fr, A.Tbuiltin VTint, _ -> M.Mnattoint v (* | A.Tbuiltin VTbls12_381_fr, A.Tbuiltin VTint, _ -> M.Mnattoint v *) | A.Tbuiltin VTstring, A.Tbuiltin VTchainid, { node = Mstring v; _} -> M.Mchain_id v | A.Tbuiltin VTstring, A.Tbuiltin VTkey, { node = Mstring v; _} -> M.Mkey v | A.Tbuiltin VTstring, A.Tbuiltin VTkeyhash, { node = Mstring v; _} -> M.Mkey_hash v | A.Tbuiltin VTstring, A.Tbuiltin VTsignature, { node = Mstring v; _} -> M.Msignature v | A.Tbuiltin VTbytes, A.Tbuiltin VTbls12_381_fr, { node = Mbytes v; _} -> M.Mbls12_381_fr v | A.Tbuiltin VTnat, A.Tbuiltin VTbls12_381_fr, { node = Mnat v; _} -> M.Mbls12_381_fr_n v | A.Tbuiltin VTint, A.Tbuiltin VTbls12_381_fr, { node = Mint v; _} -> M.Mbls12_381_fr_n v | A.Tbuiltin VTbytes, A.Tbuiltin VTbls12_381_g1, { node = Mbytes v; _} -> M.Mbls12_381_g1 v | A.Tbuiltin VTbytes, A.Tbuiltin VTbls12_381_g2, { node = Mbytes v; _} -> M.Mbls12_381_g2 v | A.Tbuiltin VTbytes, A.Tsapling_transaction n, { node = Mbytes v; _} -> M.MsaplingTransaction (n, v) | A.Tbuiltin VTbytes, A.Tbuiltin VTchest, { node = Mbytes v; _} -> M.Mchest v | A.Tbuiltin VTbytes, A.Tbuiltin VTchest_key, { node = Mbytes v; _} -> M.Mchest_key v | _ -> M.Mcast (type_to_type src, type_to_type dst, v) end | A.Pquantifer (Forall, i, (coll, typ), term) -> M.Mforall (M.mk_mident i, type_to_type typ, Option.map f coll, f term) | A.Pquantifer (Exists, i, (coll, typ), term) -> M.Mexists (M.mk_mident i, type_to_type typ, Option.map f coll, f term) | A.Pself id -> M.Mself (M.mk_mident id) | A.Pternary (c, a, b) -> begin let c = f c in let a = f a in let b = f b in match c with | {type_ = (M.Tbuiltin Bbool, _)} -> M.Mternarybool (c, a, b) | {type_ = (M.Toption _, _)} -> M.Mternaryoption (c, a, b) | _ -> assert false end | A.Pcreatecontract (ms, okh, amount, arg_storage) -> let to_michelson_struct (ms : A.michelson_struct) = M.{ ms_content = ms.ms_content } in M.Mcreatecontract (to_michelson_struct ms, f okh, f amount, f arg_storage) (* | A.Pcall (Some p, A.Cconst A.Cbefore, []) -> M.Msetbefore (f p) *) (* | A.Pcall (Some p, A.Cconst A.Cunmoved, []) -> M.Msetunmoved (f p) | A.Pcall (Some p, A.Cconst A.Cadded, []) -> M.Msetadded (f p) | A.Pcall (Some p, A.Cconst A.Cremoved, []) -> M.Msetremoved (f p) *) (* Asset *) | A.Pcall (Some p, A.Cconst (A.Cget), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Mget (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Cgetopt), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Mgetsome (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Cselect), [], [AFun (_id, _type, l, q)]) when is_asset_container p -> let fp = f p in let lambda_body = f q in let asset_name = extract_asset_name fp in let lambda_args, args = List.fold_right (fun (x, y, z) (l1, l2) -> ((unloc x, type_to_type y)::l1, (f z)::l2)) l ([], []) in M.Mselect (asset_name, to_ck env fp, lambda_args, lambda_body, args) | A.Pcall (Some p, A.Cconst (A.Csort), [], args) when is_asset_container p -> let fp = f p in let asset_name = extract_asset_name fp in let args = List.map (fun x -> match x with | A.ASorting (asc, field_name) -> begin let sort_kind = match asc with | true -> M.SKasc | false -> M.SKdesc in unloc field_name, sort_kind end | _ -> assert false) args in M.Msort (asset_name, to_ck env fp, args) | A.Pcall (Some p, A.Cconst (A.Ccontains), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Mcontains (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Cnth), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Mnth (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Ccount), [], []) when is_asset_container p -> let fp = f p in let asset_name = extract_asset_name fp in M.Mcount (asset_name, to_ck env fp) | A.Pcall (Some p, A.Cconst (A.Csum), [], [AFun (_qi, _qt, _l, q)]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Msum (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Chead), [], [AExpr e]) when is_asset_container p -> let fp = f p in let fe = f e in let asset_name = extract_asset_name fp in M.Mhead (asset_name, to_ck env fp, fe) | A.Pcall (Some p, A.Cconst (A.Ctail), [], [AExpr e]) when is_asset_container p -> let fp = f p in let fe = f e in let asset_name = extract_asset_name fp in M.Mtail (asset_name, to_ck env fp, fe) | A.Pcall (None, A.Cconst (A.CmakeAsset), [A.Tasset an], [AExpr k; AExpr v]) -> (* let vt = ft t in *) let fk = f k in let fv = f v in M.Mmakeasset (unloc (longident_to_lident an), fk, fv) | A.Pcall (Some p, A.Cconst (A.CtoContainer), [], []) -> let fp = f p in let asset_name = extract_asset_name fp in M.Mtocontainer asset_name (* Set*) | A.Pcall (None, A.Cconst (A.Csadd), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let t = extract_builtin_type_set fp in M.Msetadd (t, fp, fq) | A.Pcall (None, A.Cconst (A.Csremove), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let t = extract_builtin_type_set fp in M.Msetremove (t, fp, fq) | A.Pcall (None, A.Cconst (A.Csupdate), [], [AExpr p; AExpr q; AExpr r]) -> let fp = f p in let fq = f q in let fr = f r in let t = extract_builtin_type_set fp in M.Msetupdate (t, fp, fq, fr) | A.Pcall (None, A.Cconst (A.Cscontains), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let t = extract_builtin_type_set fp in M.Msetcontains (t, fp, fq) | A.Pcall (None, A.Cconst (A.Cslength), [], [AExpr p]) -> let fp = f p in let t = extract_builtin_type_set fp in M.Msetlength (t, fp) (* List*) | A.Pcall (None, A.Cconst (A.Cprepend), [], [AExpr p; AExpr q]) when is_list p -> ( let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlistprepend (t, fp, fq) ) | A.Pcall (None, A.Cconst (A.Clength), [], [AExpr p]) when is_list p -> let fp = f p in let t = extract_builtin_type_list fp in M.Mlistlength (t, fp) | A.Pcall (None, A.Cconst (A.Ccontains), [], [AExpr p; AExpr q]) when is_list p -> let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlistcontains (t, fp, fq) | A.Pcall (None, A.Cconst (A.Cnth), [], [AExpr p; AExpr q]) when is_list p -> let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlistnth (t, fp, fq) | A.Pcall (None, A.Cconst (A.Creverse), [], [AExpr p]) when is_list p -> let fp = f p in let t = extract_builtin_type_list fp in M.Mlistreverse (t, fp) | A.Pcall (None, A.Cconst (A.Cconcat), [], [AExpr p; AExpr q]) when is_list p -> let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlistconcat (t, fp, fq) | A.Pcall (None, A.Cconst (A.Chead), [], [AExpr p; AExpr q]) when is_list p -> ( let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlisthead (t, fp, fq) ) | A.Pcall (None, A.Cconst (A.Ctail), [], [AExpr p; AExpr q]) when is_list p -> ( let fp = f p in let fq = f q in let t = extract_builtin_type_list fp in M.Mlisttail (t, fp, fq) ) (* Map *) | A.Pcall (None, A.Cconst (A.Cmput), [], [AExpr p; AExpr q; AExpr r]) -> let fp = f p in let fq = f q in let fr = f r in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapput (mk, kt, vt, fp, fq, fr) | A.Pcall (None, A.Cconst (A.Cmremove), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapremove (mk, kt, vt, fp, fq) | A.Pcall (None, A.Cconst (A.Cmupdate), [], [AExpr p; AExpr q; AExpr r]) -> let fp = f p in let fq = f q in let fr = f r in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapupdate (mk, kt, vt, fp, fq, fr) | A.Pcall (None, A.Cconst (A.Cmget), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapget (mk, kt, vt, fp, fq, None) | A.Pcall (None, A.Cconst (A.Cmgetopt), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapgetopt (mk, kt, vt, fp, fq) | A.Pcall (None, A.Cconst (A.Cmcontains), [], [AExpr p; AExpr q]) -> let fp = f p in let fq = f q in let mk, kt, vt = extract_builtin_type_map fp in M.Mmapcontains (mk, kt, vt, fp, fq) | A.Pcall (None, A.Cconst (A.Cmlength), [], [AExpr p]) -> let fp = f p in let mk, kt, vt = extract_builtin_type_map fp in M.Mmaplength (mk, kt, vt, fp) (* Formula *) | A.Pcall (Some p, A.Cconst (A.Cempty), [], []) when is_asset_container p -> let fp = f p in let asset_name = extract_asset_name fp in M.Mempty (asset_name) | A.Pcall (Some p, A.Cconst (A.Csingleton), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Msingleton (asset_name, fq) | A.Pcall (Some p, A.Cconst (A.Csubsetof), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Msubsetof (asset_name, to_ck env fp, fq) | A.Pcall (Some p, A.Cconst (A.Cisempty), [], []) when is_asset_container p -> let fp = f p in let asset_name = extract_asset_name fp in M.Misempty (asset_name, fp) | A.Pcall (Some p, A.Cconst (A.Cunion), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Munion (asset_name, fp, fq) | A.Pcall (Some p, A.Cconst (A.Cinter), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Minter (asset_name, fp, fq) | A.Pcall (Some p, A.Cconst (A.Cdiff), [], [AExpr q]) when is_asset_container p -> let fp = f p in let fq = f q in let asset_name = extract_asset_name fp in M.Mdiff (asset_name, fp, fq) (* Builtin functions*) | A.Pcall (None, A.Cconst A.Cmin, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mmin (fa, fb) | A.Pcall (None, A.Cconst A.Cmax, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mmax (fa, fb) | A.Pcall (None, A.Cconst A.Cabs, [], [AExpr a]) -> let fa = f a in M.Mabs (fa) | A.Pcall (None, A.Cconst A.Cconcat, [], [AExpr x; AExpr y]) -> let fx = f x in let fy = f y in M.Mconcat (fx, fy) | A.Pcall (None, A.Cconst A.Cconcat, [], [AExpr x]) -> let fx = f x in M.Mconcatlist (fx) | A.Pcall (None, A.Cconst A.Cslice, [], [AExpr x; AExpr s; AExpr e]) -> let fx = f x in let fs = f s in let fe = f e in M.Mslice (fx, fs, fe) | A.Pcall (None, A.Cconst A.Clength, [], [AExpr x]) -> let fx = f x in M.Mlength (fx) | A.Pcall (None, A.Cconst A.Cisnone, [], [AExpr x]) -> let fx = f x in M.Misnone (fx) | A.Pcall (None, A.Cconst A.Cissome, [], [AExpr x]) -> let fx = f x in M.Missome (fx) | A.Pcall (None, A.Cconst A.Cinttonat, [], [AExpr x]) -> let fx = f x in M.Minttonat (fx) | A.Pcall (None, A.Cconst A.Cfloor, [], [AExpr x]) -> let fx = f x in M.Mfloor (fx) | A.Pcall (None, A.Cconst A.Cceil, [], [AExpr x]) -> let fx = f x in M.Mceil (fx) | A.Pcall (None, A.Cconst A.Cnattostring, [], [AExpr x]) -> let fx = f x in M.Mnattostring fx | A.Pcall (None, A.Cconst A.Cbytestonat, [], [AExpr x]) -> let fx = f x in M.Mbytestonat fx | A.Pcall (None, A.Cconst A.Cnattobytes, [], [AExpr x]) -> let fx = f x in M.Mnattobytes fx | A.Pcall (None, A.Cconst A.Cbytestoint, [], [AExpr x]) -> let fx = f x in M.Mbytestoint fx | A.Pcall (None, A.Cconst A.Cinttobytes, [], [AExpr x]) -> let fx = f x in M.Minttobytes fx | A.Pcall (None, A.Cconst A.Cpack, [], [AExpr x]) -> let fx = f x in M.Mpack (fx) | A.Pcall (None, A.Cconst A.Cunpack, [], [AExpr x]) -> let fx = f x in let t = match M.get_ntype type_ with | Toption t -> t | _ -> assert false in M.Munpack (t, fx) | A.Pcall (None, A.Cconst A.Csetdelegate, [], [AExpr x]) -> let fx = f x in M.Msetdelegate (fx) | A.Pcall (None, A.Cconst A.Ckeyhashtocontract, [], [AExpr x]) -> let fx = f x in M.Mkeyhashtocontract (fx) | A.Pcall (None, A.Cconst A.Csubnat, [], [AExpr x; AExpr y]) -> let fx = f x in let fy = f y in M.Msubnat (fx, fy) | A.Pcall (None, A.Cconst A.Csubmutez, [], [AExpr x; AExpr y]) -> let fx = f x in let fy = f y in M.Msubmutez (fx, fy) | A.Pcall (None, A.Cconst A.Cblake2b, [], [AExpr x]) -> let fx = f x in M.Mblake2b (fx) | A.Pcall (None, A.Cconst A.Csha256, [], [AExpr x]) -> let fx = f x in M.Msha256 (fx) | A.Pcall (None, A.Cconst A.Csha512, [], [AExpr x]) -> let fx = f x in M.Msha512 (fx) | A.Pcall (None, A.Cconst A.Csha3, [], [AExpr x]) -> let fx = f x in M.Msha3 (fx) | A.Pcall (None, A.Cconst A.Ckeccak, [], [AExpr x]) -> let fx = f x in M.Mkeccak (fx) | A.Pcall (None, A.Cconst A.Ckeytokeyhash, [], [AExpr x]) -> let fx = f x in M.Mkeytokeyhash (fx) | A.Pcall (None, A.Cconst A.Csimplifyrational, [], [AExpr x]) -> let fx = f x in M.Msimplify_rational (fx) | A.Pcall (None, A.Cconst A.Cgetnumerator, [], [AExpr x]) -> let fx = f x in M.Mget_numerator (fx) | A.Pcall (None, A.Cconst A.Cgetdenominator, [], [AExpr x]) -> let fx = f x in M.Mget_denominator (fx) | A.Pcall (None, A.Cconst A.Cchecksignature, [], [AExpr k; AExpr s; AExpr x]) -> let fk = f k in let fs = f s in let fx = f x in M.Mchecksignature (fk, fs, fx) | A.Pcall (None, A.Cconst A.Ccontracttoaddress, [], [AExpr x]) -> let fx = f x in M.Mcontracttoaddress (fx) | A.Pcall (None, A.Cconst A.Caddresstocontract, [t], [AExpr x]) -> let fx = f x in M.Maddresstocontract (type_to_type t, fx) | A.Pcall (None, A.Cconst A.Ckeytoaddress, [], [AExpr x]) -> let fx = f x in M.Mkeytoaddress (fx) | A.Pcall (_, A.Cid id, _, args) -> M.Mapp (M.mk_mident id, List.map (fun x -> term_arg_to_expr f x) args) | A.Pcall (None, A.Cconst A.Cgreedyand, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mgreedyand (fa, fb) | A.Pcall (None, A.Cconst A.Cgreedyor, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mgreedyor (fa, fb) (* | A.Pcall (None, A.Cconst (A.Cmaybeperformedonlybyrole), [AExpr l; AExpr r]) -> M.MsecMayBePerformedOnlyByRole (f l, f r) | A.Pcall (None, A.Cconst (A.Cmaybeperformedonlybyaction), [AExpr l; AExpr r]) -> M.MsecMayBePerformedOnlyByEntry (f l, f r) | A.Pcall (None, A.Cconst (A.Cmaybeperformedbyrole), [AExpr l; AExpr r]) -> M.MsecMayBePerformedByRole (f l, f r) | A.Pcall (None, A.Cconst (A.Cmaybeperformedbyaction), [AExpr l; AExpr r]) -> M.MsecMayBePerformedByEntry (f l, f r) *) (* Voting *) | A.Pcall (None, A.Cconst A.Cvotingpower, [], [AExpr x]) -> let fx = f x in M.Mvotingpower (fx) (* Ticket *) | A.Pcall (None, A.Cconst A.Ccreateticket, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mcreateticket (fa, fb) | A.Pcall (None, A.Cconst A.Creadticket, [], [AExpr x]) -> let fx = f x in M.Mreadticket fx | A.Pcall (None, A.Cconst A.Csplitticket, [], [AExpr a; AExpr b; AExpr c]) -> let fa = f a in let fb = f b in let fc = f c in M.Msplitticket (fa, fb, fc) | A.Pcall (None, A.Cconst A.Cjointickets, [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mjointickets (fa, fb) (* Sapling *) | A.Pcall (None, A.Cconst A.Csapling_empty_state, [], [AExpr x]) -> begin let fx = f x in match fx.node with | M.Mnat n -> M.Msapling_empty_state (Big_int.int_of_big_int n) | _ -> assert false end | A.Pcall (None, A.Cconst A.Csapling_verify_update, [], [AExpr x; AExpr y]) -> let fx = f x in let fy = f y in M.Msapling_verify_update (fx, fy) (* Bls curve *) | A.Pcall (None, A.Cconst A.Cpairing_check, [], [AExpr x]) -> let fx = f x in M.Mpairing_check fx (* Timelock *) | A.Pcall (None, A.Cconst A.Copen_chest, [], [AExpr x; AExpr y; AExpr z]) -> let fx = f x in let fy = f y in let fz = f z in M.Mopen_chest(fx, fy, fz) (* Operation *) | A.Pcall (None, A.Cconst (A.Cmakeoperation), [], [AExpr a; AExpr b; AExpr c]) -> let fa = f a in let fb = f b in let fc = f c in M.Mmakeoperation (fa, fb, fc) | A.Pcall (None, A.Cconst (A.Cmakeevent), [ty], [AIdent id; AExpr a]) -> M.Mmakeevent (type_to_type ty, M.mk_mident id, f a) (* Lambda *) | A.Pcall (None, A.Cconst (A.Cexec), [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mexeclambda (fa, fb) | A.Pcall (None, A.Cconst (A.Capply), [], [AExpr a; AExpr b]) -> let fa = f a in let fb = f b in M.Mapplylambda (fa, fb) (* Other *) | A.Pcall (None, A.Cconst A.Cinttodate, [], [AExpr x]) -> let fx = f x in M.Minttodate (fx) | A.Pcall (None, A.Cconst A.CmutezToNat, [], [AExpr x]) -> let fx = f x in M.Mmuteztonat (fx) | A.Pcall (None, A.Cconst A.CgetEntrypoint, [t], [AIdent id; AExpr arg]) -> let arg = f arg in let t = type_to_type t in M.Mgetentrypoint (t, M.mk_mident id, arg) | A.Pcall (None, A.Cconst A.CrequireEntrypoint, [t], [AIdent id; AExpr arg; AExpr err]) -> let arg = f arg in let err = f err in let t = type_to_type t in let ma = M.mk_mterm (M.Mgetentrypoint (t, M.mk_mident id, arg)) (M.toption (M.tcontract t)) in let idv = dumloc "_v" in let s = M.mk_mvar (M.mk_mident idv) (M.tcontract t) in M.Mmatchoption(ma, M.mk_mident idv, s, M.failg err) | A.Pcall (None, A.Cconst A.CcallView, [t], [AIdent id; AExpr addr; AExpr arg]) -> let addr = f addr in let arg = f arg in let t = type_to_type t in M.Mcallview (t, addr, M.mk_mident id, arg) | A.Pcall (None, A.Cconst A.CimportCallView, [t], [AIdent id; AExpr addr; AExpr arg]) -> let addr = f addr in let arg = f arg in let t = type_to_type t in M.Mimportcallview (t, addr, M.mk_mident id, arg) (* Fail *) | A.Pcall (aux, A.Cconst c, types, args) -> Format.eprintf "expr const unkown: %a with types: [%a] nb args: %d [%a] %s@." A.pp_const c (Printer_tools.pp_list ", " Printer_ast.pp_ptyp) types (List.length args) (Printer_tools.pp_list "; " (fun fmt x -> let str = match x with | A.AExpr _ -> "AExpr" | A.AEffect _ -> "AEffect" | A.AFun _ -> "AFun" | A.ASorting _ -> "ASorting" | AIdent _ -> "AIdent" in Printer_tools.pp_str fmt str)) args (match aux with | Some _ -> "with aux" | _ -> "without aux"); assert false in M.mk_mterm node type_ ~loc:pterm.loc in let to_label_lterm (env : env) (x : A.label_term) : M.label_term = M.mk_label_term (to_mterm { env with formula = true } x.term) (M.mk_mident (Option.get x.label)) ~loc:x.loc in let extract_asset_name (pterm : M.mterm) : Ident.ident = match pterm with | {type_ = (Tcontainer ((Tasset asset_name, _), _), _); _ } -> M.unloc_mident asset_name | _ -> assert false in let to_variable_kind = function | A.VKconstant -> M.VKconstant | A.VKvariable -> M.VKvariable in let process_var (env : env) (v : A.variable) : M.decl_node = let t : M.type_ = type_to_type (Option.get v.decl.typ) in let invariants = List.map (fun x -> to_label_lterm env x) v.invs in let var : M.var = M.mk_var (M.mk_mident v.decl.name) t t (to_variable_kind v.kind) ?default:(Option.map (to_mterm env) v.decl.default) ~invariants:invariants ~loc:v.loc in M.Dvar var in let process_enum (env : env) (e : A.enum) : M.decl_node = let values = List.map (fun (x : A.enum_item_struct) -> let id : M.lident = x.name in M.mk_enum_item (M.mk_mident id) ~args:(List.map type_to_type x.args) ~invariants:(List.map (fun x -> to_label_lterm env x) x.invariants) ) e.items in let initial : A.lident option = List.fold_left (fun accu (x : A.enum_item_struct) -> match x.initial with | true -> Some x.name | _ -> accu) None e.items in (* let initial = (match initial with | Some x -> x | _ -> emit_error (NoInitialValueFor (unloc e.name))) in *) let enum = M.mk_enum (M.mk_mident (A.Utils.get_enum_name e)) (M.mk_mident (Option.get initial)) ~values:values in M.Denum enum in let process_asset (env : env) (a : A.asset) : M.decl_node = let env = {env with asset_name = Some (unloc_longident a.name)} in let values = List.map (fun (x : A.decl_gen) -> let typ = Option.get (Option.map type_to_type x.typ) in let default = Option.map (to_mterm env) x.default in M.mk_asset_item (M.mk_mident x.name) typ typ ?default:default ~shadow:x.shadow ~loc:x.loc) a.fields in let mk_asset an l : M.mterm = let l = List.map (to_mterm env) l in M.mk_mterm (M.Masset l) (M.tasset an) ~loc:(Location.mergeall (List.map (fun (x : M.mterm) -> x.loc) l)) in let mp = match a.map_kind with | A.MKMap -> M.MKMap | A.MKBigMap -> M.MKBigMap | A.MKIterableBigMap -> M.MKIterableBigMap in let r : M.asset = M.mk_asset (longident_to_mident a.name) ~keys:(List.map unloc (a.keys)) ~values:values ~sort:(List.map M.mk_mident a.sort) ~map_kind:mp ?state:a.state ~invariants:(List.map (fun x -> (to_label_lterm env) x) a.specs) ~init:(List.map (fun x -> (mk_asset (longident_to_mident a.name)) x) a.init) ~loc:a.loc in M.Dasset r in let process_record (r : A.record) : M.record = let rec for_pos (pos : A.position) : M.position = match pos with | A.Pleaf id -> M.Ptuple [unloc id] | A.Pnode n -> begin let is_all_leaf l = List.for_all (function | A.Pleaf _ -> true | _ -> false) l in match n with | [] -> M.Pnode [] | _ when is_all_leaf n -> M.Ptuple (List.map (function | A.Pleaf id -> (unloc id) | _ -> assert false) n) | _ -> begin let update_res accu res = match accu with | [] -> res | _ -> res @ [M.Ptuple accu] in let accu, res = List.fold_left (fun (accu, res) x -> match x with | A.Pleaf id -> (accu @ [unloc id], res) | _ -> begin let res = update_res accu res in ([], res @ [for_pos x]) end) ([], []) n in M.Pnode (update_res accu res) end end in let pos = for_pos r.pos in let fs : M.record_field list = List.map (fun (x : A.decl_gen) -> let typ = Option.get (Option.map type_to_type x.typ) in M.mk_record_field (M.mk_mident x.name) typ ~loc:x.loc) r.fields in M.mk_record (longident_to_mident r.name) ~fields:fs ~pos ~loc:r.loc in let rec to_instruction (env : env) (instr : A.instruction) : M.mterm = let is_empty_seq (instr : A.instruction) = match instr.node with | A.Iseq [] -> true | _ -> false in let node = let f = to_mterm env in let g = to_instruction env in let n : A.instruction_node = instr.node in match n with | A.Iif (c, t, e) when is_empty_seq e -> M.Mif (f c, g t, None) | A.Iif (c, t, e) -> M.Mif (f c, g t, Some (g e)) | A.Ifor (i, col, body) -> begin let ncol = let x = f col in match x.node, M.get_ntype x.type_ with | _, M.Tset _ -> M.ICKset x | _, M.Tlist _ -> M.ICKlist x | _, M.Tmap _ | _, M.Tbig_map _ | _, M.Titerable_big_map _ -> M.ICKmap x | _, M.Tcontainer ((Tasset an, _), Collection) -> M.ICKcoll (M.unloc_mident an) | M.Mdotassetfield (an, _k, fn), M.Tcontainer ((Tasset _, _), (Aggregate | Partition)) -> M.ICKfield (M.unloc_mident an, M.unloc_mident fn, x) | _ -> M.ICKview x in let i = match i with | A.FIsimple x -> M.FIsimple (M.mk_mident x) | A.FIdouble (x, y) -> M.FIdouble (M.mk_mident x, M.mk_mident y) in M.Mfor (i, ncol, g body, instr.label) end | A.Iiter (i, a, b, body) -> M.Miter (M.mk_mident i, f a, f b, g body, instr.label, false) | A.Iwhile (c, body) -> M.Mwhile (f c, g body, instr.label) | A.Iletin (i, init, cont) -> M.Mletin ([M.mk_mident i], LVsimple (f init), Option.map type_to_type init.type_, g cont, None) (* TODO *) | A.Ideclvar (i, v, c) -> M.Mdeclvar ([M.mk_mident i], Option.map type_to_type v.type_, f v, c) (* TODO *) | A.Ideclvaropt (i, v, fa, c)-> M.Mdeclvaropt ([M.mk_mident i], Option.map type_to_type (match v.type_ with | Some (A.Toption ty) -> Some ty | _ -> None), f v, Option.map f fa, c) (* TODO *) | A.Iseq l -> M.Mseq (List.map g l) | A.Imatchwith (m, l) -> M.Mmatchwith (f m, List.map (fun (p, i) -> (to_pattern p, g i)) l) | A.Imatchoption (x, id, ve, ne) -> M.Minstrmatchoption (f x, M.mk_mident id, g ve, g ne) | A.Imatchor (x, lid, le, rid, re) -> M.Minstrmatchor (f x, M.mk_mident lid, g le, M.mk_mident rid, g re) | A.Imatchlist (x, hid, tid, hte, ee) -> M.Minstrmatchlist (f x, M.mk_mident hid, M.mk_mident tid, g hte, g ee) | A.Iassign (op, t, lv, e, fa) -> begin let to_ak (lv : A.lvalue) = match lv with | `Var x -> (match unloc x with | "operations" -> M.Aoperations | _ -> M.Avar (M.mk_mident x)) | `Field (rn, o, fn) -> (match o.type_ with | Some (A.Trecord rn) -> M.Arecord(f o, to_mident rn, M.mk_mident fn) | _ -> M.Aasset (to_mident rn, M.mk_mident fn, f o)) | `Asset (an, k, fn) -> M.Aasset (to_mident an, M.mk_mident fn, f k) | `Tuple (lv, i, l) -> M.Atuple (f lv, i, l) in let e = f e in let t = type_to_type t in match fa with | Some fa -> M.Massignopt (to_assignment_operator op, t, to_ak lv, e, f fa) | None -> M.Massign (to_assignment_operator op, t, to_ak lv, e) end | A.Irequire (b, t, e) -> let cond : M.mterm = if b then term_not (f t) else (f t) in let e : M.mterm = f e in M.Mif (cond, fail (Invalid e), None) | A.Itransfer tr -> begin let tr = match tr with | TTsimple (v, d) -> M.TKsimple (f v, f d) | TTcontract (v, d, id, t, arg) -> M.TKcall (f v, unloc id, type_to_type t, f d, f arg) | TTentry (v, e, arg) -> M.TKentry (f v, f e, f arg) | TTgen (v, en, cn, t, e, arg) -> M.TKgen (f v, en, cn, type_to_type t, f e, f arg) | TTself (v, id, args) -> M.TKself (f v, unloc id, List.map (fun (id, v) -> unloc id, f v) args) | TToperation v -> M.TKoperation (f v) in M.Mtransfer tr end | A.Iemit (e, v) -> M.Memit (M.mk_mident e, f v) | A.Ireturn e -> M.Mreturn (f e) | A.Ilabel i -> M.Mlabel (M.mk_mident i) | A.Ifail m -> M.Mfail (Invalid (f m)) | A.Ifailsome v -> M.Mfailsome (f v) | A.Idetach (id, dk, ty, fa) -> let to_dk = function | A.DK_option (ty, id) -> M.DK_option (type_to_type ty, id) | A.DK_map (ty, id, k) -> M.DK_map (type_to_type ty, id, f k) in M.Mdetach (M.mk_mident id, to_dk dk, type_to_type ty, f fa) | A.Icall (i, Cid id, args) -> M.Mapp (M.mk_mident id, Option.map_dfl (fun v -> [f v]) [] i @ List.map (term_arg_to_expr f) args) | A.Icall (_, A.Cconst (A.Cfail), [AExpr p]) -> M.Mfail (Invalid (f p)) | A.Icall (Some p, A.Cconst (A.Cadd), [AExpr q]) when is_asset_container p -> ( let fp = f p in let fq = f q in match fp with | {node = M.Mvar (asset_name, Vstorecol, _, _); _} -> M.Maddasset (M.unloc_mident asset_name, fq) | {node = M.Mdotassetfield (asset_name , k, fn); _} -> M.Maddfield (M.unloc_mident asset_name, M.unloc_mident fn, k, fq) | _ -> assert false ) | A.Icall (Some p, A.Cconst (A.Cput), [AExpr q]) when is_asset_container p -> ( let fp = f p in let fq = f q in match fp with | {node = M.Mvar (asset_name, Vstorecol, _, _); _} -> M.Mputsingleasset (M.unloc_mident asset_name, fq) | _ -> assert false ) | A.Icall (Some p, A.Cconst (A.Cput), [AExpr k; AExpr v]) when is_asset_container p -> ( let fp = f p in let fk = f k in let fv = f v in match fp with | {node = M.Mvar (asset_name, Vstorecol, _, _); _} -> M.Mputasset (M.unloc_mident asset_name, fk, fv) | _ -> assert false ) | A.Icall (Some p, A.Cconst (A.Cremove), [AExpr q]) when is_asset_container p -> ( let fp = f p in let fq = f q in match fp with | {node = M.Mvar (asset_name, Vstorecol, _, _); _} -> M.Mremoveasset (M.unloc_mident asset_name, fq) | {node = M.Mdotassetfield (asset_name , k, fn); _} -> M.Mremovefield (M.unloc_mident asset_name, M.unloc_mident fn, k, fq) | _ -> assert false ) | A.Icall (Some p, A.Cconst (A.Cremoveall), []) when is_asset_container p -> ( let fp = f p in match fp with | {node = M.Mdotassetfield (an , k, fn); _} -> M.Mremoveall (M.unloc_mident an, CKfield (M.unloc_mident an, M.unloc_mident fn, k, Tnone, Dnone)) | {type_ = (M.Tcontainer ((Tasset an, _), _), _)} -> M.Mremoveall (M.unloc_mident an, to_ck env fp) | _ -> assert false ) | A.Icall (Some p, A.Cconst (A.Cremoveif), [AFun (_id, _type, l, q)]) -> let fp = f p in let lambda_body = f q in let lambda_args, args = List.fold_right (fun (x, y, z) (l1, l2) -> ((unloc x, type_to_type y)::l1, (f z)::l2)) l ([], []) in begin match fp.node, M.get_ntype fp.type_ with | Mdotassetfield (an, k, fn), _ -> M.Mremoveif (M.unloc_mident an, CKfield (M.unloc_mident an, M.unloc_mident fn, k, Tnone, Dnone), lambda_args, lambda_body, args) | _, Tcontainer ((Tasset an, _), _) -> M.Mremoveif (M.unloc_mident an, CKcoll (Tnone, Dnone), lambda_args, lambda_body, args) | _ -> assert false end | A.Icall (Some p, A.Cconst (A.Cclear), []) -> ( let fp = f p in begin match fp.node, M.get_ntype fp.type_ with | Mdotassetfield (an, k, fn), _ -> M.Mclear (M.unloc_mident an, CKfield (M.unloc_mident an, M.unloc_mident fn, k, Tnone, Dnone)) | _, Tcontainer ((Tasset an, _), _) -> M.Mclear (M.unloc_mident an, to_ck env fp) | _ -> assert false end ) | A.Icall (Some p, A.Cconst (A.CputRemove), [AExpr k; AExpr v]) -> ( let fp = f p in let fk = f k in let fv = f v in begin match fp.node, M.get_ntype fp.type_ with | _, Tcontainer ((Tasset an, _), _) -> M.Mputremove (M.unloc_mident an, to_ck env fp, fk, fv) | _ -> assert false end ) | A.Icall (Some p, A.Cconst (A.Caddupdate), [AExpr k; AEffect e]) when is_asset_container p -> let to_op = function | `Assign op -> to_assignment_operator op | _ -> emit_error (instr.loc, CannotConvertToAssignOperator); bailout () in let fp = f p in let fk = f k in let fe = List.map (fun (id, op, c) -> (M.mk_mident id, to_op op, f c)) e in begin match fp.node, M.get_ntype fp.type_ with | Mdotassetfield (_, _k, fn), Tcontainer ((Tasset an, _), (Aggregate | Partition)) -> M.Maddupdate (M.unloc_mident an, CKfield (M.unloc_mident an, M.unloc_mident fn, fp, Tnone, Dnone), fk, fe) | _, Tcontainer ((Tasset an, _), Collection) -> M.Maddupdate (M.unloc_mident an, CKcoll (Tnone, Dnone), fk, fe) | _ -> assert false end | A.Icall (Some p, A.Cconst (A.Cupdate), [AExpr k; AEffect e]) when is_asset_container p -> let to_op = function | `Assign op -> to_assignment_operator op | _ -> emit_error (instr.loc, CannotConvertToAssignOperator); bailout () in let fp = f p in let fk = f k in let fe = List.map (fun (id, op, c) -> (M.mk_mident id, to_op op, f c)) e in let asset_name = extract_asset_name fp in M.Mupdate (asset_name, fk, fe) | A.Icall (Some p, A.Cconst (A.Cupdateall), [AEffect e]) when is_asset_container p -> let to_op = function | `Assign op -> to_assignment_operator op | _ -> emit_error (instr.loc, CannotConvertToAssignOperator); bailout () in let fp = f p in let fe = List.map (fun (id, op, c) -> (M.mk_mident id, to_op op, f c)) e in begin match fp.node, M.get_ntype fp.type_ with | Mdotassetfield (_, _k, fn), Tcontainer ((Tasset an, _), (Aggregate | Partition)) -> M.Mupdateall (M.unloc_mident an, CKfield (M.unloc_mident an, M.unloc_mident fn, fp, Tnone, Dnone), fe) | _, Tcontainer ((Tasset an, _), View) -> M.Mupdateall (M.unloc_mident an, CKview fp, fe) | _, Tcontainer ((Tasset an, _), Collection) -> M.Mupdateall (M.unloc_mident an, CKcoll (Tnone, Dnone), fe) | _ -> assert false end | A.Icall (aux, A.Cconst c, args) -> Format.eprintf "instr const unkown: %a with nb args: %d [%a] %s@." A.pp_const c (List.length args) (Printer_tools.pp_list "; " (fun fmt (x : A.pterm_arg) -> let str = match x with | AExpr _ -> "AExpr" | AEffect _ -> "AEffect" | AFun _ -> "AFun" | ASorting _ -> "ASorting" | AIdent _ -> "AIdent" in Printer_tools.pp_str fmt str)) args (match aux with | Some _ -> "with aux" | _ -> "without aux"); assert false in M.mk_mterm node M.tunit ~loc:instr.loc in let to_predicate (env : env) (p : A.predicate) : M.predicate = M.mk_predicate (M.mk_mident p.name) (to_mterm { env with formula = true } p.body) ~args:(List.map (fun (id, type_) -> (M.mk_mident id, type_to_type type_)) p.args) ~loc:p.loc in let to_definition (env : env) (d : A.definition ): M.definition = let env = { env with formula = true } in M.mk_definition (M.mk_mident d.name) (type_to_type d.typ) (M.mk_mident d.var) (to_mterm env d.body) ~loc:d.loc in let to_fail (env : env) (p : A.fail) : M.fail = M.mk_fail (M.mk_mident p.label) (Option.map M.mk_mident p.fid) (M.mk_mident p.arg) (type_to_type p.atype) (to_mterm { env with formula = true } p.formula) ~loc:p.loc in let to_variable (env : env) (v : A.variable) : M.variable = M.mk_variable ((fun (arg : A.decl_gen) : (M.mident * M.type_ * M.mterm option) -> (M.mk_mident arg.name, type_to_type (Option.get arg.typ), Option.map (to_mterm env) arg.default)) v.decl) (to_variable_kind v.kind) ~loc:v.loc in let to_invariant (env : env) (i : A.invariant) :M.invariant = M.mk_invariant (M.mk_mident i.label) ~formulas:(List.map (to_mterm { env with formula = true }) i.formulas) in let to_postcondition (env : env) (s : A.postcondition) : M.postcondition = M.mk_postcondition (M.mk_mident s.name) Post (to_mterm { env with formula = true } s.formula) ~invariants:(List.map (to_invariant env) s.invariants) ~uses:(List.map M.mk_mident s.uses) in let to_assert (env : env) (s : A.assert_) : M.postcondition = M.mk_postcondition (M.mk_mident s.name) Assert (to_mterm env s.formula) ~invariants:(List.map (to_invariant env) s.invariants) ~uses:(List.map M.mk_mident s.uses) in let to_specification (env : env) (v : A.specification) : M.specification = let predicates = List.map (to_predicate env) v.predicates in let definitions = List.map (to_definition env) v.definitions in let lemmas = List.map (to_label_lterm env) v.lemmas in let theorems = List.map (to_label_lterm env) v.theorems in let fails = List.map (to_fail env) v.fails in let variables = List.map (fun x -> to_variable env x) v.variables in let invariants = List.map (fun (a, l) -> (M.mk_mident a, List.map (fun x -> to_label_lterm env x) l)) v.invariants in let effects = Option.map_dfl (fun x -> [to_instruction env x]) [] v.effect in let postconditions = List.map (to_postcondition env) v.specs @ List.map (to_assert env) v.asserts in M.mk_specification ~predicates:predicates ~definitions:definitions ~lemmas:lemmas ~theorems:theorems ~fails:fails ~variables:variables ~invariants:invariants ~effects:effects ~postconditions:postconditions ~loc:v.loc () in let cont_specification (env : env) (v : A.specification) (spec : M.specification) : M.specification = let v = to_specification env v in { spec with predicates = spec.predicates @ v.predicates; definitions = spec.definitions @ v.definitions; lemmas = spec.lemmas @ v.lemmas; theorems = spec.theorems @ v.theorems; variables = spec.variables @ v.variables; invariants = spec.invariants @ v.invariants; effects = spec.effects @ v.effects; postconditions = spec.postconditions @ v.postconditions; loc = Location.merge spec.loc v.loc; } in let cont_security (s : A.security) (sec : M.security) : M.security = let to_security_item (si : A.security_item) : M.security_item = let to_security_predicate (sn : A.security_predicate) : M.security_predicate = let to_security_node (sn : A.security_node) : M.security_node = let to_security_entry (sa : A.security_entry) : M.security_entry = match sa with | Sany -> Sany | Sentry l -> Sentry l in match sn with | SonlyByRole (ad, roles) -> SonlyByRole (to_entry_description ad, roles) | SonlyInEntry (ad, action) -> SonlyInEntry (to_entry_description ad, to_security_entry action) | SonlyByRoleInEntry (ad, roles, action) -> SonlyByRoleInEntry (to_entry_description ad, roles, to_security_entry action) | SnotByRole (ad, roles) -> SnotByRole (to_entry_description ad, roles) | SnotInEntry (ad, action) -> SnotInEntry (to_entry_description ad, to_security_entry action) | SnotByRoleInEntry (ad, roles, action) -> SnotByRoleInEntry (to_entry_description ad, roles, to_security_entry action) | StransferredBy (ad) -> StransferredBy (to_entry_description ad) | StransferredTo (ad) -> StransferredTo (to_entry_description ad) | SnoStorageFail (action) -> SnoStorageFail (to_security_entry action) in M.mk_security_predicate (to_security_node sn.s_node) ~loc:sn.loc in M.mk_security_item si.label (to_security_predicate si.predicate) ~loc:si.loc in let new_s : M.security = M.mk_security ~items:(List.map to_security_item s.items) ~loc:s.loc () in { items = sec.items @ new_s.items; loc = new_s.loc; } in let process_fun_gen name args (body : M.mterm) loc spec f : M.function__ = let node = f (M.mk_function_struct name body ~args:args ~loc:loc) in M.mk_function ?spec:spec node in let process_function (env : env) (function_ : A.function_) : M.function__ = let name = M.mk_mident function_.name in let args = List.map (fun (x : A.decl_gen) -> (M.mk_mident x.name, (type_to_type |@ Option.get) x.typ, None)) function_.args in let env = {env with function_p = Some (name, args); } in let body = to_instruction env function_.body in let loc = function_.loc in let ret = type_to_type function_.return in let spec : M.specification option = Option.map (to_specification env) function_.specification in let to_vv = function | A.VVonchain -> M.VVonchain | A.VVoffchain -> M.VVoffchain | A.VVonoffchain -> M.VVonoffchain in let f = match function_.kind with | FKfunction -> (fun x -> M.Function (x, ret)) | FKgetter -> (fun x -> M.Getter (x, ret)) | FKview vv -> (fun x -> M.View (x, ret, to_vv vv)) in process_fun_gen name args body loc spec f in let add_seq (s1 : M.mterm) (s2 : M.mterm) = let extract (s : M.mterm) = match s.node with M.Mseq l -> l | _ -> [s] in let l1 = extract s1 in let l2 = extract s2 in M.mk_mterm (M.Mseq (l1 @ l2)) M.tunit in let process_transaction (env : env) (transaction : A.transaction) : M.function__ = let process_calledby env (body : M.mterm) : M.mterm = let process_cb ((caller, fa) : M.mterm * M.fail_type) (cb : (A.rexpr * A.pterm option)) (body : M.mterm) : M.mterm = let rec process_rexpr (rq : A.rexpr) : M.mterm option = match rq.node with | Rany -> None | Rasset (_, a) -> begin (* FIXME: namespace *) let an = unloc a in Some (M.mk_mterm (M.Mcontains(an, CKcoll(Tnone, Dnone), caller)) M.tbool ~loc:(loc a)) end | Rexpr e -> begin let mt = to_mterm env e in Some (M.mk_mterm (M.Mequal (M.taddress, caller, mt)) (M.tbool) ~loc:rq.loc) end | Ror (l, r) -> begin let l = process_rexpr l in let r = process_rexpr r in match l, r with | Some l, Some r -> Some (M.mk_mterm (M.Mor (l, r)) (M.tbool) ~loc:rq.loc) | _ -> None end in let rexpr = fst cb in match process_rexpr rexpr with | Some a -> let require : M.mterm = M.mk_mterm (M.Mnot (a)) (M.tbool) ~loc:rexpr.loc in let fail_auth : M.mterm = match snd cb with | Some o -> fail (Invalid (to_mterm env o)) | None -> fail fa in let cond_if = M.mk_mterm (M.Mif (require, fail_auth, None)) M.tunit in add_seq cond_if body | _ -> body in begin let process tc caller body = match tc with | None -> body | Some cb -> process_cb caller cb body in body |> process transaction.calledby (M.mcaller, M.InvalidCaller) |> process transaction.sourcedby (M.msource, M.InvalidSource) end in let process_state_is env (body : M.mterm) : M.mterm = match transaction.state_is with | Some (id, o) -> begin let var = M.mk_state_value (M.mk_mident id) in let state = M.mk_state_var () in let c = M.mk_mterm (M.Mnequal (M.tstate, var, state)) (M.tbool) ~loc:(loc id) in let cond_if = let fail = match o with Some o -> fail (Invalid (to_mterm env o)) | None -> fail InvalidState in M.mk_mterm (M.Mif (c, fail, None)) M.tunit in add_seq cond_if body end | _ -> body in let process env b (x : A.label_term) (body : M.mterm) : M.mterm = let term = to_mterm env x.term in let cond : M.mterm = match b with | `Require -> M.mk_mterm (M.Mnot term) (M.tbool) ~loc:x.loc | `Failif -> term in let fail_cond : M.mterm = fail (M.InvalidCondition (x.label |> Option.get |> unloc, Option.map (to_mterm env) x.error)) in let cond_if = M.mk_mterm (M.Mif (cond, fail_cond, None)) M.tunit ~loc:x.loc in add_seq cond_if body in let apply env b li body = match li with | None -> body | Some l -> List.fold_right (fun (x : A.label_term) (accu : M.mterm) -> process env b x accu) l body in let apply_cst env li body = let process env (x : A.label_term) (body : M.mterm) : M.mterm = let id : M.lident = Option.get x.label in let value : M.mterm = to_mterm env x.term in let fa : M.mterm option = Option.map (to_mterm env) x.error in let node = match fa with | Some fa -> M.Mdeclvaropt([M.mk_mident id], Some value.type_, value, Some fa, true) | None -> M.Mdeclvar ([M.mk_mident id], Some value.type_, value, true) in let term = M.mk_mterm node M.tunit in add_seq term body in match li with | None -> body | Some l -> List.fold_right (fun (x : A.label_term) (accu : M.mterm) -> process env x accu) l body in let process_requires env (body : M.mterm) : M.mterm = body |> apply env `Failif transaction.failif |> apply env `Require transaction.require |> apply_cst env transaction.constants in let process_accept_transfer env (body : M.mterm) : M.mterm = if (not (fst transaction.accept_transfer)) then let lhs : M.mterm = M.mk_mterm (M.Mtransferred) M.ttez in let rhs : M.mterm = M.mk_mterm (M.Mcurrency (Big_int.zero_big_int, Utz)) M.ttez in let eq : M.mterm = M.mk_mterm (M.Mequal (M.ttez, lhs, rhs)) M.tbool in let cond : M.mterm = M.mk_mterm (M.Mnot eq) M.tbool in let fail = match snd transaction.accept_transfer with | Some o -> fail (Invalid (to_mterm env o)) | None -> fail NoTransfer in let cond_if : M.mterm = M.mk_mterm (M.Mif (cond, fail, None)) M.tunit in add_seq cond_if body else body in let process_body_args env : M.argument list * M.mterm * env = let args = List.map (fun (x : A.decl_gen) -> (M.mk_mident x.name, (type_to_type |@ Option.get) x.typ, None)) transaction.args in let env = {env with function_p = Some (M.mk_mident transaction.name, args); } in let empty : M.mterm = M.mk_mterm (M.Mseq []) M.tunit in match transaction.transition, transaction.effect with | None, None -> let body = empty in args, body, env | None, Some e -> let body = to_instruction env e in args, body, env | Some t, None -> let p_on = match t.on with | Some (key_ident, key_type, {pldesc = asset_name}, enum_type) -> Some (key_ident, type_to_type key_type, asset_name, type_to_type enum_type) | None -> None in let args = match p_on with | Some (ki, kt, _an, _) -> args @ [(M.mk_mident ki, kt, None)] | None -> args in let env = {env with function_p = Some (M.mk_mident transaction.name, args); } in let build_code (body : M.mterm) : M.mterm = (List.fold_right (fun ((id, cond, effect) : (A.lident * A.pterm option * A.instruction option)) (acc : M.mterm) : M.mterm -> let tre : M.mterm = match p_on with | Some (key_ident, key_type, an, enum_type) -> let k : M.mterm = build_mvar env (M.mk_mident key_ident) key_type ~loc:(Location.loc key_ident) in let et = match M.get_ntype enum_type with | M.Tenum id -> M.unloc_mident id | _ -> assert false in let v : M.mterm = M.mk_mterm (M.Menumval (M.mk_mident id, [], et)) enum_type ~loc:(Location.loc id) in M.mk_mterm (M.Massign (ValueAssign, v.type_, Aassetstate (an, k), v)) M.tunit | _ -> (* let v : M.mterm = build_mvar env id M.tstate ~loc:(Location.loc id) in *) let v : M.mterm = M.mk_mterm (Menumval (M.mk_mident id, [], "state")) (M.tenum (M.mk_mident (dumloc "state"))) ~loc:(Location.loc id) in M.mk_mterm (M.Massign (ValueAssign, v.type_, Astate, v)) M.tunit in let code : M.mterm = match effect with | Some e -> M.mk_mterm (M.Mseq [to_instruction env e; tre]) M.tunit | None -> tre in match cond with | Some c -> M.mk_mterm (M.Mif (to_mterm env c, code, Some acc)) M.tunit | None -> code ) t.trs body) in let body : M.mterm = build_code empty in let body = match t.from.node with | Sany -> body | _ -> begin let rec compute_patterns (a : A.sexpr) : M.pattern list = match a.node with | Sref id -> [M.mk_pattern (M.Pconst (M.mk_mident id, []))] | Sor (a, b) -> [a; b] |> List.map (fun x -> compute_patterns x) |> List.flatten | Sany -> emit_error (a.loc, AnyNotAuthorizedInTransitionTo); bailout () in let list_patterns : M.pattern list = compute_patterns t.from in let pattern : M.pattern = M.mk_pattern M.Pwild in let fail_instr : M.mterm = fail InvalidState in let w = match p_on with | Some (ki, kt, an, et) -> let k : M.mterm = build_mvar env (M.mk_mident ki) kt ~loc:(Location.loc ki) in M.mk_mterm (M.Mvar (M.mk_mident (dumloc an), Vassetstate k, Tnone, Dnone)) et | _ -> M.mk_mterm (M.Mvar(M.mk_mident (dumloc ""), Vstate, Tnone, Dnone)) M.tstate in M.mk_mterm (M.Mmatchwith (w, List.map (fun x -> (x, body)) list_patterns @ [pattern, fail_instr])) M.tunit end in args, body, env | _ -> emit_error (transaction.loc, CannotExtractBody); bailout () in (* let list = list |> cont process_function ast.functions in *) let args, body, env = process_body_args env in let body = body |> process_requires env |> process_accept_transfer env |> process_state_is env |> process_calledby env in let loc = transaction.loc in let spec : M.specification option = Option.map (to_specification env) transaction.specification in process_fun_gen (M.mk_mident transaction.name) args body loc spec (fun x -> M.Entry x) in let process_parameter env (p : A.parameter) : M.parameter = { name = M.mk_mident p.name; typ = type_to_type p.typ; default = Option.map (to_mterm env) p.default; value = Option.map (to_mterm env) p.value; const = p.const; loc = p.loc; } in let process_import (i : A.import_struct) : M.import = M.{ name = i.name; path = i.path; kind_node = (match i.kind_node with | A.INMichelson { ms_content } -> M.INMichelson { ms_content } | A.INArchetype -> M.INArchetype); views = List.map (fun (x, (y, z)) -> (x, (type_to_type y, type_to_type z))) i.views; entrypoints = List.map (fun (x, y) -> (x, type_to_type y)) i.entrypoints; } in let process_decl_ (env : env) = function | A.Dvariable v -> process_var env v | A.Dasset a -> process_asset env a | A.Drecord r -> M.Drecord (process_record r) | A.Denum e -> process_enum env e | A.Devent e -> M.Devent (process_record e) in let process_fun_ (env : env) = function | A.Ffunction f -> process_function env f | A.Ftransaction t -> process_transaction env t in let name = ast.name in let env = mk_env () in let parameters = List.map (process_parameter env) ast.parameters in let imports = List.map process_import ast.imports in let metadata = Option.map (function | A.MKuri x -> M.MKuri x | A.MKjson x -> M.MKjson x) ast.metadata in let decls = List.map (process_decl_ env) ast.decls in let functions = List.map (process_fun_ env) ast.funs in let specification = M.mk_specification () |> (fun spec -> List.fold_left (fun accu x -> cont_specification env x accu) spec ast.specifications) in let security = M.mk_security () |> (fun sec -> List.fold_left (fun accu x -> cont_security x accu) sec ast.securities) in M.mk_model ~parameters ~imports ?metadata ~decls ~functions ~specification ~security ~loc:ast.loc name
shrinker.mli
(** Shrinkers produce small values from large values. When a random test case fails, a shrinker finds the simplest version of the problem. *) open! Base type 'a t (** {2 Basic Shrinkers} *) (** This shrinker treats a type as atomic, never attempting to produce smaller values. *) val atomic : _ t include With_basic_types.S with type 'a t := 'a t (** @inline *) val map_t : 'key t -> 'data t -> ('key, 'data, 'cmp) Map.t t val set_t : 'elt t -> ('elt, 'cmp) Set.t t val map_tree_using_comparator : comparator:('key, 'cmp) Comparator.t -> 'key t -> 'data t -> ('key, 'data, 'cmp) Map.Using_comparator.Tree.t t val set_tree_using_comparator : comparator:('elt, 'cmp) Comparator.t -> 'elt t -> ('elt, 'cmp) Set.Using_comparator.Tree.t t (** {2 Modifying Shrinkers} *) val map : 'a t -> f:('a -> 'b) -> f_inverse:('b -> 'a) -> 'b t val filter : 'a t -> f:('a -> bool) -> 'a t (** Filters and maps according to [f], and provides input to [t] via [f_inverse]. Only the [f] direction produces options, intentionally. *) val filter_map : 'a t -> f:('a -> 'b option) -> f_inverse:('b -> 'a) -> 'b t (** {2 Shrinkers for Recursive Types} *) (** Ties the recursive knot to shrink recursive types. For example, here is an shrinker for binary trees: {[ let tree_shrinker leaf_shrinker = fixed_point (fun self -> either leaf_shrinker (both self self) |> map ~f:(function | First leaf -> `Leaf leaf | Second (l, r) -> `Node (l, r)) ~f_inverse:(function | `Leaf leaf -> First leaf | `Node (l, r) -> Second (l, r))) ]} *) val fixed_point : ('a t -> 'a t) -> 'a t (** Creates a [t] that forces the lazy argument as necessary. Can be used to tie (mutually) recursive knots. *) val of_lazy : 'a t Lazy.t -> 'a t (** {2 Low-level functions} Most users will not need to call these. *) val create : ('a -> 'a Sequence.t) -> 'a t val shrink : 'a t -> 'a -> 'a Sequence.t
(** Shrinkers produce small values from large values. When a random test case fails, a shrinker finds the simplest version of the problem. *)
dune
(executable (name mkshims) (modules mkshims) (libraries dune.configurator)) (rule (targets Iter_shims_.ml) (deps mkshims.exe) (action (with-stdout-to %{targets} (run ./mkshims.exe)))) (library (name iter) (public_name iter) (wrapped false) (modules Iter IterLabels Iter_shims_) (flags :standard -w +a -warn-error -a+8 -nolabels) (libraries bytes result seq)) (env (_ (flags :standard -w +a -warn-error -a+8 -strict-sequence) (ocamlopt_flags :standard -O3 -unbox-closures -unbox-closures-factor 20)))
stubs_Gio.ml
type -'a obj module ZlibDecompressorClass = struct end module ZlibDecompressor = struct external get_file_info: [>`gzlibdecompressor] obj -> [<`gfileinfo] obj = "ml_g_zlib_decompressor_get_file_info" end module ZlibCompressorClass = struct end module ZlibCompressor = struct external set_file_info: [>`gzlibcompressor] obj -> [>`gfileinfo] obj option -> unit = "ml_g_zlib_compressor_set_file_info" external get_file_info: [>`gzlibcompressor] obj -> [<`gfileinfo] obj = "ml_g_zlib_compressor_get_file_info" end module VolumeMonitorClass = struct end module VolumeMonitor = struct external get_volumes: [>`gvolumemonitor] obj -> [<`glist] obj = "ml_g_volume_monitor_get_volumes" external get_mounts: [>`gvolumemonitor] obj -> [<`glist] obj = "ml_g_volume_monitor_get_mounts" external get_connected_drives: [>`gvolumemonitor] obj -> [<`glist] obj = "ml_g_volume_monitor_get_connected_drives" external get: unit -> [<`gvolumemonitor] obj = "ml_g_volume_monitor_get" end module VolumeIface = struct end module VfsClass = struct end module Vfs = struct external is_active: [>`gvfs] obj -> bool = "ml_g_vfs_is_active" external get_local: unit -> [<`gvfs] obj = "ml_g_vfs_get_local" external get_default: unit -> [<`gvfs] obj = "ml_g_vfs_get_default" end module UnixSocketAddressPrivate = struct end module UnixSocketAddressClass = struct end module UnixSocketAddress = struct external get_path_len: [>`gunixsocketaddress] obj -> int = "ml_g_unix_socket_address_get_path_len" external get_path: [>`gunixsocketaddress] obj -> string = "ml_g_unix_socket_address_get_path" external get_is_abstract: [>`gunixsocketaddress] obj -> bool = "ml_g_unix_socket_address_get_is_abstract" external abstract_names_supported: unit -> bool = "ml_g_unix_socket_address_abstract_names_supported" end module UnixOutputStreamPrivate = struct end module UnixOutputStreamClass = struct end module UnixOutputStream = struct external set_close_fd: [>`gunixoutputstream] obj -> bool -> unit = "ml_g_unix_output_stream_set_close_fd" external get_fd: [>`gunixoutputstream] obj -> int = "ml_g_unix_output_stream_get_fd" external get_close_fd: [>`gunixoutputstream] obj -> bool = "ml_g_unix_output_stream_get_close_fd" end module UnixMountPoint = struct external is_user_mountable: [>`gunixmountpoint] obj -> bool = "ml_g_unix_mount_point_is_user_mountable" external is_readonly: [>`gunixmountpoint] obj -> bool = "ml_g_unix_mount_point_is_readonly" external is_loopback: [>`gunixmountpoint] obj -> bool = "ml_g_unix_mount_point_is_loopback" external guess_name: [>`gunixmountpoint] obj -> string = "ml_g_unix_mount_point_guess_name" external guess_can_eject: [>`gunixmountpoint] obj -> bool = "ml_g_unix_mount_point_guess_can_eject" external get_mount_path: [>`gunixmountpoint] obj -> string = "ml_g_unix_mount_point_get_mount_path" external get_fs_type: [>`gunixmountpoint] obj -> string = "ml_g_unix_mount_point_get_fs_type" external get_device_path: [>`gunixmountpoint] obj -> string = "ml_g_unix_mount_point_get_device_path" external free: [>`gunixmountpoint] obj -> unit = "ml_g_unix_mount_point_free" external compare: [>`gunixmountpoint] obj -> [>`gunixmountpoint] obj -> int = "ml_g_unix_mount_point_compare" end module UnixMountMonitorClass = struct end module UnixMountMonitor = struct external set_rate_limit: [>`gunixmountmonitor] obj -> int -> unit = "ml_g_unix_mount_monitor_set_rate_limit" end module UnixMountEntry = struct end module UnixInputStreamPrivate = struct end module UnixInputStreamClass = struct end module UnixInputStream = struct external set_close_fd: [>`gunixinputstream] obj -> bool -> unit = "ml_g_unix_input_stream_set_close_fd" external get_fd: [>`gunixinputstream] obj -> int = "ml_g_unix_input_stream_get_fd" external get_close_fd: [>`gunixinputstream] obj -> bool = "ml_g_unix_input_stream_get_close_fd" end module UnixFDMessagePrivate = struct end module UnixFDMessageClass = struct end module UnixFDMessage = struct external get_fd_list: [>`gunixfdmessage] obj -> [<`gunixfdlist] obj = "ml_g_unix_fd_message_get_fd_list" end module UnixFDListPrivate = struct end module UnixFDListClass = struct end module UnixFDList = struct external get_length: [>`gunixfdlist] obj -> int = "ml_g_unix_fd_list_get_length" end module UnixCredentialsMessagePrivate = struct end module UnixCredentialsMessageClass = struct end module UnixCredentialsMessage = struct external get_credentials: [>`gunixcredentialsmessage] obj -> [<`gcredentials] obj = "ml_g_unix_credentials_message_get_credentials" external is_supported: unit -> bool = "ml_g_unix_credentials_message_is_supported" end module UnixConnectionPrivate = struct end module UnixConnectionClass = struct end module UnixConnection = struct end module TlsServerContext = struct end module TlsServerConnectionInterface = struct end module TlsContext = struct end module TlsConnectionPrivate = struct end module TlsConnectionClass = struct end module TlsConnection = struct external set_use_system_certdb: [>`gtlsconnection] obj -> bool -> unit = "ml_g_tls_connection_set_use_system_certdb" external set_require_close_notify: [>`gtlsconnection] obj -> bool -> unit = "ml_g_tls_connection_set_require_close_notify" external set_certificate: [>`gtlsconnection] obj -> [>`gtlscertificate] obj -> unit = "ml_g_tls_connection_set_certificate" external get_use_system_certdb: [>`gtlsconnection] obj -> bool = "ml_g_tls_connection_get_use_system_certdb" external get_require_close_notify: [>`gtlsconnection] obj -> bool = "ml_g_tls_connection_get_require_close_notify" external get_peer_certificate: [>`gtlsconnection] obj -> [<`gtlscertificate] obj = "ml_g_tls_connection_get_peer_certificate" external get_certificate: [>`gtlsconnection] obj -> [<`gtlscertificate] obj = "ml_g_tls_connection_get_certificate" end module TlsClientContext = struct end module TlsClientConnectionInterface = struct end module TlsCertificatePrivate = struct end module TlsCertificateClass = struct end module TlsCertificate = struct external get_issuer: [>`gtlscertificate] obj -> [<`gtlscertificate] obj = "ml_g_tls_certificate_get_issuer" end module TlsBackendInterface = struct end module ThreadedSocketServicePrivate = struct end module ThreadedSocketServiceClass = struct end module ThreadedSocketService = struct end module ThemedIconClass = struct end module ThemedIcon = struct external prepend_name: [>`gthemedicon] obj -> string -> unit = "ml_g_themed_icon_prepend_name" external append_name: [>`gthemedicon] obj -> string -> unit = "ml_g_themed_icon_append_name" end module TcpWrapperConnectionPrivate = struct end module TcpWrapperConnectionClass = struct end module TcpWrapperConnection = struct external get_base_io_stream: [>`gtcpwrapperconnection] obj -> [<`giostream] obj = "ml_g_tcp_wrapper_connection_get_base_io_stream" end module TcpConnectionPrivate = struct end module TcpConnectionClass = struct end module TcpConnection = struct external set_graceful_disconnect: [>`gtcpconnection] obj -> bool -> unit = "ml_g_tcp_connection_set_graceful_disconnect" external get_graceful_disconnect: [>`gtcpconnection] obj -> bool = "ml_g_tcp_connection_get_graceful_disconnect" end module SrvTarget = struct external get_weight: [>`gsrvtarget] obj -> int = "ml_g_srv_target_get_weight" external get_priority: [>`gsrvtarget] obj -> int = "ml_g_srv_target_get_priority" external get_port: [>`gsrvtarget] obj -> int = "ml_g_srv_target_get_port" external get_hostname: [>`gsrvtarget] obj -> string = "ml_g_srv_target_get_hostname" external free: [>`gsrvtarget] obj -> unit = "ml_g_srv_target_free" external copy: [>`gsrvtarget] obj -> [<`gsrvtarget] obj = "ml_g_srv_target_copy" end module SocketServicePrivate = struct end module SocketServiceClass = struct end module SocketService = struct external stop: [>`gsocketservice] obj -> unit = "ml_g_socket_service_stop" external start: [>`gsocketservice] obj -> unit = "ml_g_socket_service_start" external is_active: [>`gsocketservice] obj -> bool = "ml_g_socket_service_is_active" end module SocketPrivate = struct end module SocketListenerPrivate = struct end module SocketListenerClass = struct end module SocketListener = struct external set_backlog: [>`gsocketlistener] obj -> int -> unit = "ml_g_socket_listener_set_backlog" external close: [>`gsocketlistener] obj -> unit = "ml_g_socket_listener_close" end module SocketControlMessagePrivate = struct end module SocketControlMessageClass = struct end module SocketControlMessage = struct external get_size: [>`gsocketcontrolmessage] obj -> int = "ml_g_socket_control_message_get_size" external get_msg_type: [>`gsocketcontrolmessage] obj -> int = "ml_g_socket_control_message_get_msg_type" external get_level: [>`gsocketcontrolmessage] obj -> int = "ml_g_socket_control_message_get_level" end module SocketConnectionPrivate = struct end module SocketConnectionClass = struct end module SocketConnection = struct external get_socket: [>`gsocketconnection] obj -> [<`gsocket] obj = "ml_g_socket_connection_get_socket" end module SocketConnectableIface = struct end module SocketClientPrivate = struct end module SocketClientClass = struct end module SocketClient = struct external set_tls: [>`gsocketclient] obj -> bool -> unit = "ml_g_socket_client_set_tls" external set_timeout: [>`gsocketclient] obj -> int -> unit = "ml_g_socket_client_set_timeout" external set_local_address: [>`gsocketclient] obj -> [>`gsocketaddress] obj -> unit = "ml_g_socket_client_set_local_address" external set_enable_proxy: [>`gsocketclient] obj -> bool -> unit = "ml_g_socket_client_set_enable_proxy" external get_tls: [>`gsocketclient] obj -> bool = "ml_g_socket_client_get_tls" external get_timeout: [>`gsocketclient] obj -> int = "ml_g_socket_client_get_timeout" external get_local_address: [>`gsocketclient] obj -> [<`gsocketaddress] obj = "ml_g_socket_client_get_local_address" external get_enable_proxy: [>`gsocketclient] obj -> bool = "ml_g_socket_client_get_enable_proxy" external add_application_proxy: [>`gsocketclient] obj -> string -> unit = "ml_g_socket_client_add_application_proxy" end module SocketClass = struct end module SocketAddressEnumeratorClass = struct end module SocketAddressEnumerator = struct end module SocketAddressClass = struct end module SocketAddress = struct external get_native_size: [>`gsocketaddress] obj -> int = "ml_g_socket_address_get_native_size" end module Socket = struct external speaks_ipv4: [>`gsocket] obj -> bool = "ml_g_socket_speaks_ipv4" external set_timeout: [>`gsocket] obj -> int -> unit = "ml_g_socket_set_timeout" external set_listen_backlog: [>`gsocket] obj -> int -> unit = "ml_g_socket_set_listen_backlog" external set_keepalive: [>`gsocket] obj -> bool -> unit = "ml_g_socket_set_keepalive" external set_blocking: [>`gsocket] obj -> bool -> unit = "ml_g_socket_set_blocking" external is_connected: [>`gsocket] obj -> bool = "ml_g_socket_is_connected" external is_closed: [>`gsocket] obj -> bool = "ml_g_socket_is_closed" external get_timeout: [>`gsocket] obj -> int = "ml_g_socket_get_timeout" external get_listen_backlog: [>`gsocket] obj -> int = "ml_g_socket_get_listen_backlog" external get_keepalive: [>`gsocket] obj -> bool = "ml_g_socket_get_keepalive" external get_fd: [>`gsocket] obj -> int = "ml_g_socket_get_fd" external get_blocking: [>`gsocket] obj -> bool = "ml_g_socket_get_blocking" external connection_factory_create_connection: [>`gsocket] obj -> [<`gsocketconnection] obj = "ml_g_socket_connection_factory_create_connection" end module SimplePermission = struct end module SimpleAsyncResultClass = struct end module SimpleAsyncResult = struct external take_error: [>`gsimpleasyncresult] obj -> [>`gerror] obj -> unit = "ml_g_simple_async_result_take_error" external set_op_res_gssize: [>`gsimpleasyncresult] obj -> int -> unit = "ml_g_simple_async_result_set_op_res_gssize" external set_op_res_gboolean: [>`gsimpleasyncresult] obj -> bool -> unit = "ml_g_simple_async_result_set_op_res_gboolean" external set_handle_cancellation: [>`gsimpleasyncresult] obj -> bool -> unit = "ml_g_simple_async_result_set_handle_cancellation" external set_from_error: [>`gsimpleasyncresult] obj -> [>`gerror] obj -> unit = "ml_g_simple_async_result_set_from_error" external get_op_res_gssize: [>`gsimpleasyncresult] obj -> int = "ml_g_simple_async_result_get_op_res_gssize" external get_op_res_gboolean: [>`gsimpleasyncresult] obj -> bool = "ml_g_simple_async_result_get_op_res_gboolean" external complete_in_idle: [>`gsimpleasyncresult] obj -> unit = "ml_g_simple_async_result_complete_in_idle" external complete: [>`gsimpleasyncresult] obj -> unit = "ml_g_simple_async_result_complete" end module SimpleActionPrivate = struct end module SimpleActionGroupPrivate = struct end module SimpleActionGroupClass = struct end module SimpleActionGroup = struct external remove: [>`gsimpleactiongroup] obj -> string -> unit = "ml_g_simple_action_group_remove" end module SimpleActionClass = struct end module SimpleAction = struct external set_enabled: [>`gsimpleaction] obj -> bool -> unit = "ml_g_simple_action_set_enabled" end module SettingsPrivate = struct end module SettingsClass = struct end module SettingsBackend = struct end module Settings = struct external set_value: [>`gsettings] obj -> string -> [>`gvariant] obj -> bool = "ml_g_settings_set_value" external set_string: [>`gsettings] obj -> string -> string -> bool = "ml_g_settings_set_string" external set_int: [>`gsettings] obj -> string -> int -> bool = "ml_g_settings_set_int" external set_flags: [>`gsettings] obj -> string -> int -> bool = "ml_g_settings_set_flags" external set_enum: [>`gsettings] obj -> string -> int -> bool = "ml_g_settings_set_enum" external set_double: [>`gsettings] obj -> string -> float -> bool = "ml_g_settings_set_double" external set_boolean: [>`gsettings] obj -> string -> bool -> bool = "ml_g_settings_set_boolean" external revert: [>`gsettings] obj -> unit = "ml_g_settings_revert" external reset: [>`gsettings] obj -> string -> unit = "ml_g_settings_reset" external range_check: [>`gsettings] obj -> string -> [>`gvariant] obj -> bool = "ml_g_settings_range_check" external is_writable: [>`gsettings] obj -> string -> bool = "ml_g_settings_is_writable" external get_value: [>`gsettings] obj -> string -> [<`gvariant] obj = "ml_g_settings_get_value" external get_string: [>`gsettings] obj -> string -> string = "ml_g_settings_get_string" external get_range: [>`gsettings] obj -> string -> [<`gvariant] obj = "ml_g_settings_get_range" external get_int: [>`gsettings] obj -> string -> int = "ml_g_settings_get_int" external get_has_unapplied: [>`gsettings] obj -> bool = "ml_g_settings_get_has_unapplied" external get_flags: [>`gsettings] obj -> string -> int = "ml_g_settings_get_flags" external get_enum: [>`gsettings] obj -> string -> int = "ml_g_settings_get_enum" external get_double: [>`gsettings] obj -> string -> float = "ml_g_settings_get_double" external get_child: [>`gsettings] obj -> string -> [<`gsettings] obj = "ml_g_settings_get_child" external get_boolean: [>`gsettings] obj -> string -> bool = "ml_g_settings_get_boolean" external delay: [>`gsettings] obj -> unit = "ml_g_settings_delay" external apply: [>`gsettings] obj -> unit = "ml_g_settings_apply" external sync: unit -> unit = "ml_g_settings_sync" end module SeekableIface = struct end module ResolverPrivate = struct end module ResolverClass = struct end module Resolver = struct external set_default: [>`gresolver] obj -> unit = "ml_g_resolver_set_default" external get_default: unit -> [<`gresolver] obj = "ml_g_resolver_get_default" external free_targets: [>`glist] obj -> unit = "ml_g_resolver_free_targets" external free_addresses: [>`glist] obj -> unit = "ml_g_resolver_free_addresses" end module ProxyResolverInterface = struct end module ProxyInterface = struct end module ProxyAddressPrivate = struct end module ProxyAddressEnumeratorPrivate = struct end module ProxyAddressEnumeratorClass = struct end module ProxyAddressEnumerator = struct end module ProxyAddressClass = struct end module ProxyAddress = struct external get_username: [>`gproxyaddress] obj -> string = "ml_g_proxy_address_get_username" external get_protocol: [>`gproxyaddress] obj -> string = "ml_g_proxy_address_get_protocol" external get_password: [>`gproxyaddress] obj -> string = "ml_g_proxy_address_get_password" external get_destination_port: [>`gproxyaddress] obj -> int = "ml_g_proxy_address_get_destination_port" external get_destination_hostname: [>`gproxyaddress] obj -> string = "ml_g_proxy_address_get_destination_hostname" end module PollableOutputStreamInterface = struct end module PollableInputStreamInterface = struct end module PermissionPrivate = struct end module PermissionClass = struct end module Permission = struct external impl_update: [>`gpermission] obj -> bool -> bool -> bool -> unit = "ml_g_permission_impl_update" external get_can_release: [>`gpermission] obj -> bool = "ml_g_permission_get_can_release" external get_can_acquire: [>`gpermission] obj -> bool = "ml_g_permission_get_can_acquire" external get_allowed: [>`gpermission] obj -> bool = "ml_g_permission_get_allowed" end module OutputVector = struct end module OutputStreamPrivate = struct end module OutputStreamClass = struct end module OutputStream = struct external is_closing: [>`goutputstream] obj -> bool = "ml_g_output_stream_is_closing" external is_closed: [>`goutputstream] obj -> bool = "ml_g_output_stream_is_closed" external has_pending: [>`goutputstream] obj -> bool = "ml_g_output_stream_has_pending" external clear_pending: [>`goutputstream] obj -> unit = "ml_g_output_stream_clear_pending" end module NetworkServicePrivate = struct end module NetworkServiceClass = struct end module NetworkService = struct external set_scheme: [>`gnetworkservice] obj -> string -> unit = "ml_g_network_service_set_scheme" external get_service: [>`gnetworkservice] obj -> string = "ml_g_network_service_get_service" external get_scheme: [>`gnetworkservice] obj -> string = "ml_g_network_service_get_scheme" external get_protocol: [>`gnetworkservice] obj -> string = "ml_g_network_service_get_protocol" external get_domain: [>`gnetworkservice] obj -> string = "ml_g_network_service_get_domain" end module NetworkAddressPrivate = struct end module NetworkAddressClass = struct end module NetworkAddress = struct external get_scheme: [>`gnetworkaddress] obj -> string = "ml_g_network_address_get_scheme" external get_port: [>`gnetworkaddress] obj -> int = "ml_g_network_address_get_port" external get_hostname: [>`gnetworkaddress] obj -> string = "ml_g_network_address_get_hostname" end module NativeVolumeMonitorClass = struct end module NativeVolumeMonitor = struct end module MountOperationPrivate = struct end module MountOperationClass = struct end module MountOperation = struct external set_username: [>`gmountoperation] obj -> string -> unit = "ml_g_mount_operation_set_username" external set_password: [>`gmountoperation] obj -> string -> unit = "ml_g_mount_operation_set_password" external set_domain: [>`gmountoperation] obj -> string -> unit = "ml_g_mount_operation_set_domain" external set_choice: [>`gmountoperation] obj -> int -> unit = "ml_g_mount_operation_set_choice" external set_anonymous: [>`gmountoperation] obj -> bool -> unit = "ml_g_mount_operation_set_anonymous" external get_username: [>`gmountoperation] obj -> string = "ml_g_mount_operation_get_username" external get_password: [>`gmountoperation] obj -> string = "ml_g_mount_operation_get_password" external get_domain: [>`gmountoperation] obj -> string = "ml_g_mount_operation_get_domain" external get_choice: [>`gmountoperation] obj -> int = "ml_g_mount_operation_get_choice" external get_anonymous: [>`gmountoperation] obj -> bool = "ml_g_mount_operation_get_anonymous" end module MountIface = struct end module MemoryOutputStreamPrivate = struct end module MemoryOutputStreamClass = struct end module MemoryOutputStream = struct external get_size: [>`gmemoryoutputstream] obj -> int = "ml_g_memory_output_stream_get_size" external get_data_size: [>`gmemoryoutputstream] obj -> int = "ml_g_memory_output_stream_get_data_size" end module MemoryInputStreamPrivate = struct end module MemoryInputStreamClass = struct end module MemoryInputStream = struct end module LoadableIconIface = struct end module InputVector = struct end module InputStreamPrivate = struct end module InputStreamClass = struct end module InputStream = struct external is_closed: [>`ginputstream] obj -> bool = "ml_g_input_stream_is_closed" external has_pending: [>`ginputstream] obj -> bool = "ml_g_input_stream_has_pending" external clear_pending: [>`ginputstream] obj -> unit = "ml_g_input_stream_clear_pending" end module InitableIface = struct end module InetSocketAddressPrivate = struct end module InetSocketAddressClass = struct end module InetSocketAddress = struct external get_port: [>`ginetsocketaddress] obj -> int = "ml_g_inet_socket_address_get_port" external get_address: [>`ginetsocketaddress] obj -> [<`ginetaddress] obj = "ml_g_inet_socket_address_get_address" end module InetAddressPrivate = struct end module InetAddressClass = struct end module InetAddress = struct external to_string: [>`ginetaddress] obj -> string = "ml_g_inet_address_to_string" external get_native_size: [>`ginetaddress] obj -> int = "ml_g_inet_address_get_native_size" external get_is_site_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_site_local" external get_is_multicast: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_multicast" external get_is_mc_site_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_mc_site_local" external get_is_mc_org_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_mc_org_local" external get_is_mc_node_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_mc_node_local" external get_is_mc_link_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_mc_link_local" external get_is_mc_global: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_mc_global" external get_is_loopback: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_loopback" external get_is_link_local: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_link_local" external get_is_any: [>`ginetaddress] obj -> bool = "ml_g_inet_address_get_is_any" end module IconIface = struct end module IOStreamPrivate = struct end module IOStreamClass = struct end module IOStreamAdapter = struct end module IOStream = struct external is_closed: [>`giostream] obj -> bool = "ml_g_io_stream_is_closed" external has_pending: [>`giostream] obj -> bool = "ml_g_io_stream_has_pending" external get_output_stream: [>`giostream] obj -> [<`goutputstream] obj = "ml_g_io_stream_get_output_stream" external get_input_stream: [>`giostream] obj -> [<`ginputstream] obj = "ml_g_io_stream_get_input_stream" external clear_pending: [>`giostream] obj -> unit = "ml_g_io_stream_clear_pending" end module IOSchedulerJob = struct end module IOModuleClass = struct end module IOModule = struct external unload: [>`giomodule] obj -> unit = "ml_g_io_module_unload" external load: [>`giomodule] obj -> unit = "ml_g_io_module_load" end module IOExtensionPoint = struct external set_required_type: [>`gioextensionpoint] obj -> int -> unit = "ml_g_io_extension_point_set_required_type" external get_required_type: [>`gioextensionpoint] obj -> int = "ml_g_io_extension_point_get_required_type" external get_extensions: [>`gioextensionpoint] obj -> [<`glist] obj = "ml_g_io_extension_point_get_extensions" external get_extension_by_name: [>`gioextensionpoint] obj -> string -> [<`gioextension] obj = "ml_g_io_extension_point_get_extension_by_name" end module IOExtension = struct external ref_class: [>`gioextension] obj -> [<`gtypeclass] obj = "ml_g_io_extension_ref_class" external get_priority: [>`gioextension] obj -> int = "ml_g_io_extension_get_priority" external get_name: [>`gioextension] obj -> string = "ml_g_io_extension_get_name" end module FilterOutputStreamClass = struct end module FilterOutputStream = struct external set_close_base_stream: [>`gfilteroutputstream] obj -> bool -> unit = "ml_g_filter_output_stream_set_close_base_stream" external get_close_base_stream: [>`gfilteroutputstream] obj -> bool = "ml_g_filter_output_stream_get_close_base_stream" external get_base_stream: [>`gfilteroutputstream] obj -> [<`goutputstream] obj = "ml_g_filter_output_stream_get_base_stream" end module FilterInputStreamClass = struct end module FilterInputStream = struct external set_close_base_stream: [>`gfilterinputstream] obj -> bool -> unit = "ml_g_filter_input_stream_set_close_base_stream" external get_close_base_stream: [>`gfilterinputstream] obj -> bool = "ml_g_filter_input_stream_get_close_base_stream" external get_base_stream: [>`gfilterinputstream] obj -> [<`ginputstream] obj = "ml_g_filter_input_stream_get_base_stream" end module FilenameCompleterClass = struct end module FilenameCompleter = struct external set_dirs_only: [>`gfilenamecompleter] obj -> bool -> unit = "ml_g_filename_completer_set_dirs_only" external get_completion_suffix: [>`gfilenamecompleter] obj -> string -> string = "ml_g_filename_completer_get_completion_suffix" end module FileOutputStreamPrivate = struct end module FileOutputStreamClass = struct end module FileOutputStream = struct external get_etag: [>`gfileoutputstream] obj -> string = "ml_g_file_output_stream_get_etag" end module FileMonitorPrivate = struct end module FileMonitorClass = struct end module FileMonitor = struct external set_rate_limit: [>`gfilemonitor] obj -> int -> unit = "ml_g_file_monitor_set_rate_limit" external is_cancelled: [>`gfilemonitor] obj -> bool = "ml_g_file_monitor_is_cancelled" external cancel: [>`gfilemonitor] obj -> bool = "ml_g_file_monitor_cancel" end module FileInputStreamPrivate = struct end module FileInputStreamClass = struct end module FileInputStream = struct end module FileInfoClass = struct end module FileInfo = struct external unset_attribute_mask: [>`gfileinfo] obj -> unit = "ml_g_file_info_unset_attribute_mask" external set_symlink_target: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_set_symlink_target" external set_sort_order: [>`gfileinfo] obj -> int32 -> unit = "ml_g_file_info_set_sort_order" external set_name: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_set_name" external set_modification_time: [>`gfileinfo] obj -> [>`gtimeval] obj -> unit = "ml_g_file_info_set_modification_time" external set_is_symlink: [>`gfileinfo] obj -> bool -> unit = "ml_g_file_info_set_is_symlink" external set_is_hidden: [>`gfileinfo] obj -> bool -> unit = "ml_g_file_info_set_is_hidden" external set_edit_name: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_set_edit_name" external set_display_name: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_set_display_name" external set_content_type: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_set_content_type" external set_attribute_uint64: [>`gfileinfo] obj -> string -> int64 -> unit = "ml_g_file_info_set_attribute_uint64" external set_attribute_uint32: [>`gfileinfo] obj -> string -> int32 -> unit = "ml_g_file_info_set_attribute_uint32" external set_attribute_string: [>`gfileinfo] obj -> string -> string -> unit = "ml_g_file_info_set_attribute_string" external set_attribute_mask: [>`gfileinfo] obj -> [>`gfileattributematcher] obj -> unit = "ml_g_file_info_set_attribute_mask" external set_attribute_int64: [>`gfileinfo] obj -> string -> int64 -> unit = "ml_g_file_info_set_attribute_int64" external set_attribute_int32: [>`gfileinfo] obj -> string -> int32 -> unit = "ml_g_file_info_set_attribute_int32" external set_attribute_byte_string: [>`gfileinfo] obj -> string -> string -> unit = "ml_g_file_info_set_attribute_byte_string" external set_attribute_boolean: [>`gfileinfo] obj -> string -> bool -> unit = "ml_g_file_info_set_attribute_boolean" external remove_attribute: [>`gfileinfo] obj -> string -> unit = "ml_g_file_info_remove_attribute" external has_namespace: [>`gfileinfo] obj -> string -> bool = "ml_g_file_info_has_namespace" external has_attribute: [>`gfileinfo] obj -> string -> bool = "ml_g_file_info_has_attribute" external get_symlink_target: [>`gfileinfo] obj -> string = "ml_g_file_info_get_symlink_target" external get_sort_order: [>`gfileinfo] obj -> int32 = "ml_g_file_info_get_sort_order" external get_name: [>`gfileinfo] obj -> string = "ml_g_file_info_get_name" external get_modification_time: [>`gfileinfo] obj -> [>`gtimeval] obj -> unit = "ml_g_file_info_get_modification_time" external get_is_symlink: [>`gfileinfo] obj -> bool = "ml_g_file_info_get_is_symlink" external get_is_hidden: [>`gfileinfo] obj -> bool = "ml_g_file_info_get_is_hidden" external get_is_backup: [>`gfileinfo] obj -> bool = "ml_g_file_info_get_is_backup" external get_etag: [>`gfileinfo] obj -> string = "ml_g_file_info_get_etag" external get_edit_name: [>`gfileinfo] obj -> string = "ml_g_file_info_get_edit_name" external get_display_name: [>`gfileinfo] obj -> string = "ml_g_file_info_get_display_name" external get_content_type: [>`gfileinfo] obj -> string = "ml_g_file_info_get_content_type" external get_attribute_uint64: [>`gfileinfo] obj -> string -> int64 = "ml_g_file_info_get_attribute_uint64" external get_attribute_uint32: [>`gfileinfo] obj -> string -> int32 = "ml_g_file_info_get_attribute_uint32" external get_attribute_string: [>`gfileinfo] obj -> string -> string = "ml_g_file_info_get_attribute_string" external get_attribute_int64: [>`gfileinfo] obj -> string -> int64 = "ml_g_file_info_get_attribute_int64" external get_attribute_int32: [>`gfileinfo] obj -> string -> int32 = "ml_g_file_info_get_attribute_int32" external get_attribute_byte_string: [>`gfileinfo] obj -> string -> string = "ml_g_file_info_get_attribute_byte_string" external get_attribute_boolean: [>`gfileinfo] obj -> string -> bool = "ml_g_file_info_get_attribute_boolean" external get_attribute_as_string: [>`gfileinfo] obj -> string -> string = "ml_g_file_info_get_attribute_as_string" external dup: [>`gfileinfo] obj -> [<`gfileinfo] obj = "ml_g_file_info_dup" external copy_into: [>`gfileinfo] obj -> [>`gfileinfo] obj -> unit = "ml_g_file_info_copy_into" external clear_status: [>`gfileinfo] obj -> unit = "ml_g_file_info_clear_status" end module FileIface = struct end module FileIconClass = struct end module FileIcon = struct end module FileIOStreamPrivate = struct end module FileIOStreamClass = struct end module FileIOStream = struct external get_etag: [>`gfileiostream] obj -> string = "ml_g_file_io_stream_get_etag" end module FileEnumeratorPrivate = struct end module FileEnumeratorClass = struct end module FileEnumerator = struct external set_pending: [>`gfileenumerator] obj -> bool -> unit = "ml_g_file_enumerator_set_pending" external is_closed: [>`gfileenumerator] obj -> bool = "ml_g_file_enumerator_is_closed" external has_pending: [>`gfileenumerator] obj -> bool = "ml_g_file_enumerator_has_pending" end module FileDescriptorBasedIface = struct end module FileAttributeMatcher = struct external unref: [>`gfileattributematcher] obj -> unit = "ml_g_file_attribute_matcher_unref" external ref: [>`gfileattributematcher] obj -> [<`gfileattributematcher] obj = "ml_g_file_attribute_matcher_ref" external matches_only: [>`gfileattributematcher] obj -> string -> bool = "ml_g_file_attribute_matcher_matches_only" external matches: [>`gfileattributematcher] obj -> string -> bool = "ml_g_file_attribute_matcher_matches" external enumerate_next: [>`gfileattributematcher] obj -> string = "ml_g_file_attribute_matcher_enumerate_next" external enumerate_namespace: [>`gfileattributematcher] obj -> string -> bool = "ml_g_file_attribute_matcher_enumerate_namespace" end module FileAttributeInfoList = struct external unref: [>`gfileattributeinfolist] obj -> unit = "ml_g_file_attribute_info_list_unref" external ref: [>`gfileattributeinfolist] obj -> [<`gfileattributeinfolist] obj = "ml_g_file_attribute_info_list_ref" external lookup: [>`gfileattributeinfolist] obj -> string -> [<`gfileattributeinfo] obj = "ml_g_file_attribute_info_list_lookup" external dup: [>`gfileattributeinfolist] obj -> [<`gfileattributeinfolist] obj = "ml_g_file_attribute_info_list_dup" end module FileAttributeInfo = struct end module EmblemedIconPrivate = struct end module EmblemedIconClass = struct end module EmblemedIcon = struct external get_emblems: [>`gemblemedicon] obj -> [<`glist] obj = "ml_g_emblemed_icon_get_emblems" external clear_emblems: [>`gemblemedicon] obj -> unit = "ml_g_emblemed_icon_clear_emblems" external add_emblem: [>`gemblemedicon] obj -> [>`gemblem] obj -> unit = "ml_g_emblemed_icon_add_emblem" end module EmblemClass = struct end module Emblem = struct end module DriveIface = struct end module DesktopAppInfoLookupIface = struct end module DesktopAppInfoLaunchHandlerIface = struct end module DesktopAppInfoClass = struct end module DesktopAppInfo = struct external get_is_hidden: [>`gdesktopappinfo] obj -> bool = "ml_g_desktop_app_info_get_is_hidden" external get_filename: [>`gdesktopappinfo] obj -> string = "ml_g_desktop_app_info_get_filename" external set_desktop_env: string -> unit = "ml_g_desktop_app_info_set_desktop_env" end module DataOutputStreamPrivate = struct end module DataOutputStreamClass = struct end module DataOutputStream = struct end module DataInputStreamPrivate = struct end module DataInputStreamClass = struct end module DataInputStream = struct end module DBusSubtreeVTable = struct end module DBusSignalInfo = struct external unref: [>`gdbussignalinfo] obj -> unit = "ml_g_dbus_signal_info_unref" external ref: [>`gdbussignalinfo] obj -> [<`gdbussignalinfo] obj = "ml_g_dbus_signal_info_ref" end module DBusServer = struct external stop: [>`gdbusserver] obj -> unit = "ml_g_dbus_server_stop" external start: [>`gdbusserver] obj -> unit = "ml_g_dbus_server_start" external is_active: [>`gdbusserver] obj -> bool = "ml_g_dbus_server_is_active" external get_guid: [>`gdbusserver] obj -> string = "ml_g_dbus_server_get_guid" external get_client_address: [>`gdbusserver] obj -> string = "ml_g_dbus_server_get_client_address" end module DBusProxyPrivate = struct end module DBusProxyClass = struct end module DBusProxy = struct external set_interface_info: [>`gdbusproxy] obj -> [>`gdbusinterfaceinfo] obj -> unit = "ml_g_dbus_proxy_set_interface_info" external set_default_timeout: [>`gdbusproxy] obj -> int -> unit = "ml_g_dbus_proxy_set_default_timeout" external set_cached_property: [>`gdbusproxy] obj -> string -> [>`gvariant] obj -> unit = "ml_g_dbus_proxy_set_cached_property" external get_object_path: [>`gdbusproxy] obj -> string = "ml_g_dbus_proxy_get_object_path" external get_name_owner: [>`gdbusproxy] obj -> string = "ml_g_dbus_proxy_get_name_owner" external get_name: [>`gdbusproxy] obj -> string = "ml_g_dbus_proxy_get_name" external get_interface_name: [>`gdbusproxy] obj -> string = "ml_g_dbus_proxy_get_interface_name" external get_interface_info: [>`gdbusproxy] obj -> [<`gdbusinterfaceinfo] obj = "ml_g_dbus_proxy_get_interface_info" external get_default_timeout: [>`gdbusproxy] obj -> int = "ml_g_dbus_proxy_get_default_timeout" external get_connection: [>`gdbusproxy] obj -> [<`gdbusconnection] obj = "ml_g_dbus_proxy_get_connection" external get_cached_property: [>`gdbusproxy] obj -> string -> [<`gvariant] obj = "ml_g_dbus_proxy_get_cached_property" end module DBusPropertyInfo = struct external unref: [>`gdbuspropertyinfo] obj -> unit = "ml_g_dbus_property_info_unref" external ref: [>`gdbuspropertyinfo] obj -> [<`gdbuspropertyinfo] obj = "ml_g_dbus_property_info_ref" end module DBusNodeInfo = struct external unref: [>`gdbusnodeinfo] obj -> unit = "ml_g_dbus_node_info_unref" external ref: [>`gdbusnodeinfo] obj -> [<`gdbusnodeinfo] obj = "ml_g_dbus_node_info_ref" external lookup_interface: [>`gdbusnodeinfo] obj -> string -> [<`gdbusinterfaceinfo] obj = "ml_g_dbus_node_info_lookup_interface" external generate_xml: [>`gdbusnodeinfo] obj -> int -> [>`gstring] obj -> unit = "ml_g_dbus_node_info_generate_xml" end module DBusMethodInvocation = struct external return_value: [>`gdbusmethodinvocation] obj -> [>`gvariant] obj -> unit = "ml_g_dbus_method_invocation_return_value" external return_gerror: [>`gdbusmethodinvocation] obj -> [>`gerror] obj -> unit = "ml_g_dbus_method_invocation_return_gerror" external return_error_literal: [>`gdbusmethodinvocation] obj -> int32 -> int -> string -> unit = "ml_g_dbus_method_invocation_return_error_literal" external return_dbus_error: [>`gdbusmethodinvocation] obj -> string -> string -> unit = "ml_g_dbus_method_invocation_return_dbus_error" external get_sender: [>`gdbusmethodinvocation] obj -> string = "ml_g_dbus_method_invocation_get_sender" external get_parameters: [>`gdbusmethodinvocation] obj -> [<`gvariant] obj = "ml_g_dbus_method_invocation_get_parameters" external get_object_path: [>`gdbusmethodinvocation] obj -> string = "ml_g_dbus_method_invocation_get_object_path" external get_method_name: [>`gdbusmethodinvocation] obj -> string = "ml_g_dbus_method_invocation_get_method_name" external get_method_info: [>`gdbusmethodinvocation] obj -> [<`gdbusmethodinfo] obj = "ml_g_dbus_method_invocation_get_method_info" external get_message: [>`gdbusmethodinvocation] obj -> [<`gdbusmessage] obj = "ml_g_dbus_method_invocation_get_message" external get_interface_name: [>`gdbusmethodinvocation] obj -> string = "ml_g_dbus_method_invocation_get_interface_name" external get_connection: [>`gdbusmethodinvocation] obj -> [<`gdbusconnection] obj = "ml_g_dbus_method_invocation_get_connection" end module DBusMethodInfo = struct external unref: [>`gdbusmethodinfo] obj -> unit = "ml_g_dbus_method_info_unref" external ref: [>`gdbusmethodinfo] obj -> [<`gdbusmethodinfo] obj = "ml_g_dbus_method_info_ref" end module DBusMessage = struct external set_unix_fd_list: [>`gdbusmessage] obj -> [>`gunixfdlist] obj option -> unit = "ml_g_dbus_message_set_unix_fd_list" external set_signature: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_signature" external set_serial: [>`gdbusmessage] obj -> int32 -> unit = "ml_g_dbus_message_set_serial" external set_sender: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_sender" external set_reply_serial: [>`gdbusmessage] obj -> int32 -> unit = "ml_g_dbus_message_set_reply_serial" external set_path: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_path" external set_num_unix_fds: [>`gdbusmessage] obj -> int32 -> unit = "ml_g_dbus_message_set_num_unix_fds" external set_member: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_member" external set_interface: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_interface" external set_error_name: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_error_name" external set_destination: [>`gdbusmessage] obj -> string -> unit = "ml_g_dbus_message_set_destination" external set_body: [>`gdbusmessage] obj -> [>`gvariant] obj -> unit = "ml_g_dbus_message_set_body" external print: [>`gdbusmessage] obj -> int -> string = "ml_g_dbus_message_print" external new_method_reply: [>`gdbusmessage] obj -> [<`gdbusmessage] obj = "ml_g_dbus_message_new_method_reply" external new_method_error_literal: [>`gdbusmessage] obj -> string -> string -> [<`gdbusmessage] obj = "ml_g_dbus_message_new_method_error_literal" external lock: [>`gdbusmessage] obj -> unit = "ml_g_dbus_message_lock" external get_unix_fd_list: [>`gdbusmessage] obj -> [<`gunixfdlist] obj = "ml_g_dbus_message_get_unix_fd_list" external get_signature: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_signature" external get_serial: [>`gdbusmessage] obj -> int32 = "ml_g_dbus_message_get_serial" external get_sender: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_sender" external get_reply_serial: [>`gdbusmessage] obj -> int32 = "ml_g_dbus_message_get_reply_serial" external get_path: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_path" external get_num_unix_fds: [>`gdbusmessage] obj -> int32 = "ml_g_dbus_message_get_num_unix_fds" external get_member: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_member" external get_locked: [>`gdbusmessage] obj -> bool = "ml_g_dbus_message_get_locked" external get_interface: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_interface" external get_header_fields: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_header_fields" external get_error_name: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_error_name" external get_destination: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_destination" external get_body: [>`gdbusmessage] obj -> [<`gvariant] obj = "ml_g_dbus_message_get_body" external get_arg0: [>`gdbusmessage] obj -> string = "ml_g_dbus_message_get_arg0" end module DBusInterfaceVTable = struct end module DBusInterfaceInfo = struct external unref: [>`gdbusinterfaceinfo] obj -> unit = "ml_g_dbus_interface_info_unref" external ref: [>`gdbusinterfaceinfo] obj -> [<`gdbusinterfaceinfo] obj = "ml_g_dbus_interface_info_ref" external lookup_signal: [>`gdbusinterfaceinfo] obj -> string -> [<`gdbussignalinfo] obj = "ml_g_dbus_interface_info_lookup_signal" external lookup_property: [>`gdbusinterfaceinfo] obj -> string -> [<`gdbuspropertyinfo] obj = "ml_g_dbus_interface_info_lookup_property" external lookup_method: [>`gdbusinterfaceinfo] obj -> string -> [<`gdbusmethodinfo] obj = "ml_g_dbus_interface_info_lookup_method" external generate_xml: [>`gdbusinterfaceinfo] obj -> int -> [>`gstring] obj -> unit = "ml_g_dbus_interface_info_generate_xml" end module DBusErrorEntry = struct end module DBusConnection = struct external unregister_subtree: [>`gdbusconnection] obj -> int -> bool = "ml_g_dbus_connection_unregister_subtree" external unregister_object: [>`gdbusconnection] obj -> int -> bool = "ml_g_dbus_connection_unregister_object" external start_message_processing: [>`gdbusconnection] obj -> unit = "ml_g_dbus_connection_start_message_processing" external signal_unsubscribe: [>`gdbusconnection] obj -> int -> unit = "ml_g_dbus_connection_signal_unsubscribe" external set_exit_on_close: [>`gdbusconnection] obj -> bool -> unit = "ml_g_dbus_connection_set_exit_on_close" external remove_filter: [>`gdbusconnection] obj -> int -> unit = "ml_g_dbus_connection_remove_filter" external is_closed: [>`gdbusconnection] obj -> bool = "ml_g_dbus_connection_is_closed" external get_unique_name: [>`gdbusconnection] obj -> string = "ml_g_dbus_connection_get_unique_name" external get_stream: [>`gdbusconnection] obj -> [<`giostream] obj = "ml_g_dbus_connection_get_stream" external get_peer_credentials: [>`gdbusconnection] obj -> [<`gcredentials] obj = "ml_g_dbus_connection_get_peer_credentials" external get_guid: [>`gdbusconnection] obj -> string = "ml_g_dbus_connection_get_guid" external get_exit_on_close: [>`gdbusconnection] obj -> bool = "ml_g_dbus_connection_get_exit_on_close" end module DBusAuthObserver = struct external authorize_authenticated_peer: [>`gdbusauthobserver] obj -> [>`giostream] obj -> [>`gcredentials] obj -> bool = "ml_g_dbus_auth_observer_authorize_authenticated_peer" end module DBusArgInfo = struct external unref: [>`gdbusarginfo] obj -> unit = "ml_g_dbus_arg_info_unref" external ref: [>`gdbusarginfo] obj -> [<`gdbusarginfo] obj = "ml_g_dbus_arg_info_ref" end module DBusAnnotationInfo = struct external unref: [>`gdbusannotationinfo] obj -> unit = "ml_g_dbus_annotation_info_unref" external ref: [>`gdbusannotationinfo] obj -> [<`gdbusannotationinfo] obj = "ml_g_dbus_annotation_info_ref" end module CredentialsClass = struct end module Credentials = struct external to_string: [>`gcredentials] obj -> string = "ml_g_credentials_to_string" end module ConverterOutputStreamPrivate = struct end module ConverterOutputStreamClass = struct end module ConverterOutputStream = struct end module ConverterInputStreamPrivate = struct end module ConverterInputStreamClass = struct end module ConverterInputStream = struct end module ConverterIface = struct end module CharsetConverterClass = struct end module CharsetConverter = struct external set_use_fallback: [>`gcharsetconverter] obj -> bool -> unit = "ml_g_charset_converter_set_use_fallback" external get_use_fallback: [>`gcharsetconverter] obj -> bool = "ml_g_charset_converter_get_use_fallback" external get_num_fallbacks: [>`gcharsetconverter] obj -> int = "ml_g_charset_converter_get_num_fallbacks" end module CancellablePrivate = struct end module CancellableClass = struct end module Cancellable = struct external source_new: [>`gcancellable] obj -> [<`gsource] obj = "ml_g_cancellable_source_new" external reset: [>`gcancellable] obj -> unit = "ml_g_cancellable_reset" external release_fd: [>`gcancellable] obj -> unit = "ml_g_cancellable_release_fd" external push_current: [>`gcancellable] obj -> unit = "ml_g_cancellable_push_current" external pop_current: [>`gcancellable] obj -> unit = "ml_g_cancellable_pop_current" external make_pollfd: [>`gcancellable] obj -> [>`gpollfd] obj -> bool = "ml_g_cancellable_make_pollfd" external is_cancelled: [>`gcancellable] obj -> bool = "ml_g_cancellable_is_cancelled" external get_fd: [>`gcancellable] obj -> int = "ml_g_cancellable_get_fd" external disconnect: [>`gcancellable] obj -> float -> unit = "ml_g_cancellable_disconnect" external cancel: [>`gcancellable] obj -> unit = "ml_g_cancellable_cancel" external get_current: unit -> [<`gcancellable] obj = "ml_g_cancellable_get_current" end module BufferedOutputStreamPrivate = struct end module BufferedOutputStreamClass = struct end module BufferedOutputStream = struct external set_buffer_size: [>`gbufferedoutputstream] obj -> int -> unit = "ml_g_buffered_output_stream_set_buffer_size" external set_auto_grow: [>`gbufferedoutputstream] obj -> bool -> unit = "ml_g_buffered_output_stream_set_auto_grow" external get_buffer_size: [>`gbufferedoutputstream] obj -> int = "ml_g_buffered_output_stream_get_buffer_size" external get_auto_grow: [>`gbufferedoutputstream] obj -> bool = "ml_g_buffered_output_stream_get_auto_grow" end module BufferedInputStreamPrivate = struct end module BufferedInputStreamClass = struct end module BufferedInputStream = struct external set_buffer_size: [>`gbufferedinputstream] obj -> int -> unit = "ml_g_buffered_input_stream_set_buffer_size" external get_buffer_size: [>`gbufferedinputstream] obj -> int = "ml_g_buffered_input_stream_get_buffer_size" external get_available: [>`gbufferedinputstream] obj -> int = "ml_g_buffered_input_stream_get_available" end module AsyncResultIface = struct end module AsyncInitableIface = struct end module ApplicationPrivate = struct end module ApplicationCommandLinePrivate = struct end module ApplicationCommandLineClass = struct end module ApplicationCommandLine = struct external set_exit_status: [>`gapplicationcommandline] obj -> int -> unit = "ml_g_application_command_line_set_exit_status" external getenv: [>`gapplicationcommandline] obj -> string -> string = "ml_g_application_command_line_getenv" external get_platform_data: [>`gapplicationcommandline] obj -> [<`gvariant] obj = "ml_g_application_command_line_get_platform_data" external get_is_remote: [>`gapplicationcommandline] obj -> bool = "ml_g_application_command_line_get_is_remote" external get_exit_status: [>`gapplicationcommandline] obj -> int = "ml_g_application_command_line_get_exit_status" external get_cwd: [>`gapplicationcommandline] obj -> string = "ml_g_application_command_line_get_cwd" end module ApplicationClass = struct end module Application = struct external set_inactivity_timeout: [>`gapplication] obj -> int -> unit = "ml_g_application_set_inactivity_timeout" external set_application_id: [>`gapplication] obj -> string -> unit = "ml_g_application_set_application_id" external release: [>`gapplication] obj -> unit = "ml_g_application_release" external hold: [>`gapplication] obj -> unit = "ml_g_application_hold" external get_is_remote: [>`gapplication] obj -> bool = "ml_g_application_get_is_remote" external get_is_registered: [>`gapplication] obj -> bool = "ml_g_application_get_is_registered" external get_inactivity_timeout: [>`gapplication] obj -> int = "ml_g_application_get_inactivity_timeout" external get_application_id: [>`gapplication] obj -> string = "ml_g_application_get_application_id" external activate: [>`gapplication] obj -> unit = "ml_g_application_activate" external id_is_valid: string -> bool = "ml_g_application_id_is_valid" end module AppLaunchContextPrivate = struct end module AppLaunchContextClass = struct end module AppLaunchContext = struct external launch_failed: [>`gapplaunchcontext] obj -> string -> unit = "ml_g_app_launch_context_launch_failed" end module AppInfoIface = struct end module ActionInterface = struct end module ActionGroupInterface = struct end (* Global functions *) external unix_mounts_changed_since: int64 -> bool = "ml_g_unix_mounts_changed_since" external unix_mount_points_changed_since: int64 -> bool = "ml_g_unix_mount_points_changed_since" external unix_mount_is_system_internal: [>`gunixmountentry] obj -> bool = "ml_g_unix_mount_is_system_internal" external unix_mount_is_readonly: [>`gunixmountentry] obj -> bool = "ml_g_unix_mount_is_readonly" external unix_mount_guess_should_display: [>`gunixmountentry] obj -> bool = "ml_g_unix_mount_guess_should_display" external unix_mount_guess_name: [>`gunixmountentry] obj -> string = "ml_g_unix_mount_guess_name" external unix_mount_guess_can_eject: [>`gunixmountentry] obj -> bool = "ml_g_unix_mount_guess_can_eject" external unix_mount_get_mount_path: [>`gunixmountentry] obj -> string = "ml_g_unix_mount_get_mount_path" external unix_mount_get_fs_type: [>`gunixmountentry] obj -> string = "ml_g_unix_mount_get_fs_type" external unix_mount_get_device_path: [>`gunixmountentry] obj -> string = "ml_g_unix_mount_get_device_path" external unix_mount_free: [>`gunixmountentry] obj -> unit = "ml_g_unix_mount_free" external unix_mount_compare: [>`gunixmountentry] obj -> [>`gunixmountentry] obj -> int = "ml_g_unix_mount_compare" external unix_is_mount_path_system_internal: string -> bool = "ml_g_unix_is_mount_path_system_internal" external tls_error_quark: unit -> int32 = "ml_g_tls_error_quark" external srv_target_list_sort: [>`glist] obj -> [<`glist] obj = "ml_g_srv_target_list_sort" external resolver_error_quark: unit -> int32 = "ml_g_resolver_error_quark" external io_scheduler_cancel_all_jobs: unit -> unit = "ml_g_io_scheduler_cancel_all_jobs" external io_modules_scan_all_in_directory: string -> unit = "ml_g_io_modules_scan_all_in_directory" external io_modules_load_all_in_directory: string -> [<`glist] obj = "ml_g_io_modules_load_all_in_directory" external io_extension_point_register: string -> [<`gioextensionpoint] obj = "ml_g_io_extension_point_register" external io_extension_point_lookup: string -> [<`gioextensionpoint] obj = "ml_g_io_extension_point_lookup" external io_extension_point_implement: string -> int -> string -> int -> [<`gioextension] obj = "ml_g_io_extension_point_implement" external io_extension_get_type: [>`gioextension] obj -> int = "ml_g_io_extension_get_type" external io_error_quark: unit -> int32 = "ml_g_io_error_quark" external dbus_is_unique_name: string -> bool = "ml_g_dbus_is_unique_name" external dbus_is_name: string -> bool = "ml_g_dbus_is_name" external dbus_is_member_name: string -> bool = "ml_g_dbus_is_member_name" external dbus_is_interface_name: string -> bool = "ml_g_dbus_is_interface_name" external dbus_is_guid: string -> bool = "ml_g_dbus_is_guid" external dbus_is_address: string -> bool = "ml_g_dbus_is_address" external dbus_generate_guid: unit -> string = "ml_g_dbus_generate_guid" external dbus_error_unregister_error: int32 -> int -> string -> bool = "ml_g_dbus_error_unregister_error" external dbus_error_strip_remote_error: [>`gerror] obj -> bool = "ml_g_dbus_error_strip_remote_error" external dbus_error_register_error: int32 -> int -> string -> bool = "ml_g_dbus_error_register_error" external dbus_error_quark: unit -> int32 = "ml_g_dbus_error_quark" external dbus_error_new_for_dbus_error: string -> string -> [<`gerror] obj = "ml_g_dbus_error_new_for_dbus_error" external dbus_error_is_remote_error: [>`gerror] obj -> bool = "ml_g_dbus_error_is_remote_error" external dbus_error_get_remote_error: [>`gerror] obj -> string = "ml_g_dbus_error_get_remote_error" external dbus_error_encode_gerror: [>`gerror] obj -> string = "ml_g_dbus_error_encode_gerror" external content_types_get_registered: unit -> [<`glist] obj = "ml_g_content_types_get_registered" external content_type_is_unknown: string -> bool = "ml_g_content_type_is_unknown" external content_type_is_a: string -> string -> bool = "ml_g_content_type_is_a" external content_type_get_mime_type: string -> string = "ml_g_content_type_get_mime_type" external content_type_get_description: string -> string = "ml_g_content_type_get_description" external content_type_from_mime_type: string -> string = "ml_g_content_type_from_mime_type" external content_type_equals: string -> string -> bool = "ml_g_content_type_equals" external content_type_can_be_executable: string -> bool = "ml_g_content_type_can_be_executable" external bus_unwatch_name: int -> unit = "ml_g_bus_unwatch_name" external bus_unown_name: int -> unit = "ml_g_bus_unown_name" external app_info_reset_type_associations: string -> unit = "ml_g_app_info_reset_type_associations" external app_info_get_recommended_for_type: string -> [<`glist] obj = "ml_g_app_info_get_recommended_for_type" external app_info_get_fallback_for_type: string -> [<`glist] obj = "ml_g_app_info_get_fallback_for_type" external app_info_get_all_for_type: string -> [<`glist] obj = "ml_g_app_info_get_all_for_type" external app_info_get_all: unit -> [<`glist] obj = "ml_g_app_info_get_all" (* End of global functions *)
bonsai_web_ui_partial_render_table_bench.ml
open! Core open! Bonsai open! Bonsai_web open Incr_map_collate open Bonsai_web_ui_partial_render_table open Bonsai_bench open Bonsai.Let_syntax module Table = Expert module Action = struct type 'a t = | Unfocus | Focus_up | Focus_down | Page_up | Page_down | Focus of 'a [@@deriving sexp, equal] end module Input = struct type ('key, 'data, 'cmp) t = { filter : (key:'key -> data:'data -> bool) option Var.t ; order : ('key, 'data, 'cmp) Incr_map_collate.Compare.t Var.t ; rank_range : int Collate.Which_range.t Var.t ; key_range : 'key Collate.Which_range.t Var.t ; map : ('key, 'data, 'cmp) Map.t Var.t ; on_change : ('key option -> unit Effect.t) Var.t } let create ?(filter = None) ?(order = Compare.Unchanged) ?(rank_range = Collate.Which_range.To 100) ?(key_range = Collate.Which_range.All_rows) ?(on_change = Fn.const Effect.Ignore) map = { filter = Var.create filter ; order = Var.create order ; rank_range = Var.create rank_range ; key_range = Var.create key_range ; map = Var.create map ; on_change = Var.create on_change } ;; let apply_filter t filter = Interaction.change_input t.filter (Some filter) let clear_filter t = Interaction.change_input t.filter None let set_map t map = Interaction.change_input t.map map let set_order t order = Interaction.change_input t.order order let set_rank_range t rank_range = Interaction.change_input t.rank_range rank_range let set_on_change t on_change = Interaction.change_input t.on_change on_change let scroll t ~start ~stop ~window_size = let stride = if start > stop then -1 else 1 in List.range ~stride start stop |> List.map ~f:(fun i -> Interaction.change_input t.rank_range (Collate.Which_range.Between (i, i + window_size - 1))) |> Interaction.many_with_stabilizations ;; end let component_for_bench ?preload_rows comparator ~columns { Input.filter; order; rank_range; key_range; map; on_change } = let filter = Var.value filter in let order = Var.value order in let rank_range = Var.value rank_range in let key_range = Var.value key_range in let map = Var.value map in let on_change = Var.value on_change in let%sub collate = let collate = let%map filter = filter and order = order and rank_range = rank_range and key_range = key_range in { Collate.filter; order; key_range; rank_range } in Table.collate ~filter_equal:phys_equal ~filter_to_predicate:Fn.id ~order_equal:phys_equal ~order_to_compare:Fn.id map collate in Table.component ?preload_rows comparator ~focus:(Focus.By_row { on_change }) ~row_height:(`Px 1) ~columns collate ;; let create_test ?preload_rows comparator ~initial_vars ~columns ~interaction ~test_name = let interaction = interaction initial_vars in let component = component_for_bench comparator ?preload_rows ~columns initial_vars in let get_inject { Table.Result.focus; _ } = function | Action.Unfocus -> focus.Focus.By_row.unfocus | Focus_up -> focus.focus_up | Focus_down -> focus.focus_down | Page_up -> focus.page_up | Page_down -> focus.page_down | Focus key -> focus.focus key in Test.create ~name:test_name ~component ~get_inject interaction ;;
example_2.ml
open Ocp_reveal.Html open Ocp_reveal.Slides (* New frame *) let _ = frame { default with title = title5 "Make your presentation with OCaml"; content = {| You can write your presentation in OCaml thant to `ocp-reveal`: ```ocaml let my_slide = frame { defaut with title = title3 "Some title"; content = "Some dummy content"; } ```|} } (* New frame *) let _ = frame { default with title = title5 "Settings"; content = {| All the fields are optional, so you can edit and set the properties that you want to change : ```ocaml type slide = { title : Omd.element; content : string; transition : transition; video: path option; text_color : color; background_color : color; background_img : path option; background_video : path option; background_embed : path option; } ```|} } (* New frame *) let _ = frame { default with title = title3 "Some video inside the slide !"; video = Some "http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"; background_color = Some White; } (* New frame *) let _ = frame { default with title = title3 "Slide 2.1"; content = {| Hey ! **How** are *you* 1. first 2. second 3. third |}; background_color = Some Black; } (* New frame *) let _ = frame { slide with background_video = Some "https://s3.amazonaws.com/static.slid.es/site/homepage/v1/homepage-video-editor.mp4"; } let () = auto_make_config "Demo ocp-reveal - auto"
bzlaslvfun.c
#include "bzlaslvfun.h" #include "bzlabeta.h" #include "bzlaclone.h" #include "bzlacore.h" #include "bzladbg.h" #include "bzladcr.h" #include "bzlaexp.h" #include "bzlalog.h" #include "bzlalsutils.h" #include "bzlamodel.h" #include "bzlaopt.h" #include "bzlaprintmodel.h" #include "bzlaslvprop.h" #include "bzlaslvsls.h" #include "preprocess/bzlapreprocess.h" #include "utils/bzlaabort.h" #include "utils/bzlahash.h" #include "utils/bzlahashint.h" #include "utils/bzlahashptr.h" #include "utils/bzlanodeiter.h" #include "utils/bzlastack.h" #include "utils/bzlaunionfind.h" #include "utils/bzlautil.h" /*------------------------------------------------------------------------*/ static BzlaFunSolver * clone_fun_solver(Bzla *clone, BzlaFunSolver *slv, BzlaNodeMap *exp_map) { assert(clone); assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(exp_map); uint32_t h; Bzla *bzla; BzlaFunSolver *res; bzla = slv->bzla; BZLA_NEW(clone->mm, res); memcpy(res, slv, sizeof(BzlaFunSolver)); res->bzla = clone; res->lemmas = bzla_hashptr_table_clone( clone->mm, slv->lemmas, bzla_clone_key_as_node, 0, exp_map, 0); bzla_clone_node_ptr_stack( clone->mm, &slv->cur_lemmas, &res->cur_lemmas, exp_map, false); bzla_clone_node_ptr_stack( clone->mm, &slv->constraints, &res->constraints, exp_map, false); if (slv->score) { h = bzla_opt_get(bzla, BZLA_OPT_FUN_JUST_HEURISTIC); if (h == BZLA_JUST_HEUR_BRANCH_MIN_APP) { res->score = bzla_hashptr_table_clone(clone->mm, slv->score, bzla_clone_key_as_node, bzla_clone_data_as_ptr_htable, exp_map, exp_map); } else { assert(h == BZLA_JUST_HEUR_BRANCH_MIN_DEP); res->score = bzla_hashptr_table_clone(clone->mm, slv->score, bzla_clone_key_as_node, bzla_clone_data_as_int, exp_map, 0); } } BZLA_INIT_STACK(clone->mm, res->stats.lemmas_size); if (BZLA_SIZE_STACK(slv->stats.lemmas_size) > 0) { BZLA_CNEWN(clone->mm, res->stats.lemmas_size.start, BZLA_SIZE_STACK(slv->stats.lemmas_size)); res->stats.lemmas_size.end = res->stats.lemmas_size.start + BZLA_SIZE_STACK(slv->stats.lemmas_size); res->stats.lemmas_size.top = res->stats.lemmas_size.start + BZLA_COUNT_STACK(slv->stats.lemmas_size); memcpy(res->stats.lemmas_size.start, slv->stats.lemmas_size.start, BZLA_SIZE_STACK(slv->stats.lemmas_size) * sizeof(uint32_t)); } return res; } static void delete_fun_solver(BzlaFunSolver *slv) { assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(slv->bzla); assert(slv->bzla->slv == (BzlaSolver *) slv); BzlaPtrHashTable *t; BzlaPtrHashTableIterator it, iit; BzlaNode *exp; Bzla *bzla; bzla = slv->bzla; bzla_iter_hashptr_init(&it, slv->lemmas); while (bzla_iter_hashptr_has_next(&it)) bzla_node_release(bzla, bzla_iter_hashptr_next(&it)); bzla_hashptr_table_delete(slv->lemmas); if (slv->score) { bzla_iter_hashptr_init(&it, slv->score); while (bzla_iter_hashptr_has_next(&it)) { if (bzla_opt_get(bzla, BZLA_OPT_FUN_JUST_HEURISTIC) == BZLA_JUST_HEUR_BRANCH_MIN_APP) { t = (BzlaPtrHashTable *) it.bucket->data.as_ptr; exp = bzla_iter_hashptr_next(&it); bzla_node_release(bzla, exp); bzla_iter_hashptr_init(&iit, t); while (bzla_iter_hashptr_has_next(&iit)) bzla_node_release(bzla, bzla_iter_hashptr_next(&iit)); bzla_hashptr_table_delete(t); } else { assert(bzla_opt_get(bzla, BZLA_OPT_FUN_JUST_HEURISTIC) == BZLA_JUST_HEUR_BRANCH_MIN_DEP); bzla_node_release(bzla, bzla_iter_hashptr_next(&it)); } } bzla_hashptr_table_delete(slv->score); } BZLA_RELEASE_STACK(slv->cur_lemmas); while (!BZLA_EMPTY_STACK(slv->constraints)) { bzla_node_release(bzla, BZLA_POP_STACK(slv->constraints)); } BZLA_RELEASE_STACK(slv->constraints); BZLA_RELEASE_STACK(slv->stats.lemmas_size); BZLA_DELETE(bzla->mm, slv); bzla->slv = 0; } /*------------------------------------------------------------------------*/ static bool incremental_required(Bzla *bzla) { bool res = false; uint32_t i; BzlaNode *cur; BzlaPtrHashTableIterator it; BzlaNodePtrStack stack; BzlaIntHashTable *cache; /* If model generation is enabled for all nodes, we don't have to traverse * the formula, but check if functions have been created. */ if (bzla_opt_get(bzla, BZLA_OPT_PRODUCE_MODELS) > 1) { return bzla->ufs->count > 0 || bzla->lambdas->count > 0; } BZLA_INIT_STACK(bzla->mm, stack); cache = bzla_hashint_table_new(bzla->mm); bzla_iter_hashptr_init(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); BZLA_PUSH_STACK(stack, cur); } bzla_iter_hashptr_init(&it, bzla->inputs); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_simplify_exp(bzla, bzla_iter_hashptr_next(&it)); BZLA_PUSH_STACK(stack, cur); } while (!BZLA_EMPTY_STACK(stack)) { cur = bzla_node_real_addr(BZLA_POP_STACK(stack)); if (bzla_hashint_table_contains(cache, cur->id)) continue; bzla_hashint_table_add(cache, cur->id); if (bzla_node_is_fun(cur) || cur->apply_below || cur->lambda_below // These FP operators introduce uninterpreted functions. || bzla_node_is_fp_to_sbv(cur) || bzla_node_is_fp_to_ubv(cur) || bzla_node_is_fp_min(cur) || bzla_node_is_fp_max(cur)) { res = true; break; } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(stack, cur->e[i]); } bzla_hashint_table_delete(cache); BZLA_RELEASE_STACK(stack); return res; } static void configure_sat_mgr(Bzla *bzla) { BzlaSATMgr *smgr; smgr = bzla_get_sat_mgr(bzla); if (bzla_sat_is_initialized(smgr)) return; bzla_sat_enable_solver(smgr); bzla_sat_init(smgr); /* reset SAT solver to non-incremental if all functions have been * eliminated */ if (!bzla_opt_get(bzla, BZLA_OPT_INCREMENTAL) && smgr->inc_required && !incremental_required(bzla)) { smgr->inc_required = false; BZLA_MSG(bzla->msg, 1, "no functions found, resetting SAT solver to non-incremental"); if (bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP)) { bzla_opt_set(bzla, BZLA_OPT_FUN_DUAL_PROP, 0); BZLA_MSG(bzla->msg, 1, "no functions found, disabling --fun:dual-prop"); } } BZLA_ABORT(smgr->inc_required && !bzla_sat_mgr_has_incremental_support(smgr), "selected SAT solver '%s' does not support incremental mode", smgr->name); } static BzlaSolverResult timed_sat_sat(Bzla *bzla, int32_t limit) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); double start, delta; BzlaSolverResult res; BzlaSATMgr *smgr; BzlaAIGMgr *amgr; amgr = bzla_get_aig_mgr(bzla); BZLA_MSG(bzla->msg, 1, "%u AIG vars, %u AIG ands, %u CNF vars, %u CNF clauses", amgr->cur_num_aig_vars, amgr->cur_num_aigs, amgr->num_cnf_vars, amgr->num_cnf_clauses); smgr = bzla_get_sat_mgr(bzla); start = bzla_util_time_stamp(); res = bzla_sat_check_sat(smgr, limit); delta = bzla_util_time_stamp() - start; BZLA_FUN_SOLVER(bzla)->time.sat += delta; BZLA_MSG( bzla->msg, 2, "SAT solver returns %d after %.1f seconds", res, delta); return res; } static bool has_bv_assignment(Bzla *bzla, BzlaNode *exp) { exp = bzla_node_real_addr(exp); return (bzla->bv_model && bzla_hashint_map_contains(bzla->bv_model, exp->id)) || bzla_node_is_synth(exp) || bzla_node_is_bv_const(exp); } static BzlaBitVector * get_bv_assignment(Bzla *bzla, BzlaNode *exp) { assert(bzla->bv_model); assert(!bzla_node_real_addr(exp)->parameterized); BzlaNode *real_exp; BzlaBitVector *bv, *result; BzlaHashTableData *d; exp = bzla_node_get_simplified(bzla, exp); real_exp = bzla_node_real_addr(exp); if ((d = bzla_hashint_map_get(bzla->bv_model, real_exp->id))) bv = bzla_bv_copy(bzla->mm, d->as_ptr); else /* cache assignment to avoid querying the sat solver multiple times */ { /* synthesized nodes are always encoded and have an assignment */ if (bzla_node_is_synth(real_exp)) bv = bzla_model_get_bv_assignment(bzla, real_exp); else if (bzla_node_is_bv_const(real_exp)) bv = bzla_bv_copy(bzla->mm, bzla_node_bv_const_get_bits(real_exp)); /* initialize var, apply, and feq nodes if they are not yet synthesized * and encoded (not in the BV skeleton yet, and thus unconstrained). */ else if (bzla_node_is_bv_var(real_exp) || bzla_node_is_apply(real_exp) || bzla_node_is_fun_eq(real_exp)) { if (!bzla_node_is_synth(real_exp)) BZLALOG(1, "zero-initialize: %s", bzla_util_node2string(real_exp)); bv = bzla_model_get_bv_assignment(bzla, real_exp); } else bv = bzla_eval_exp(bzla, real_exp); bzla_model_add_to_bv(bzla, bzla->bv_model, real_exp, bv); } if (bzla_node_is_inverted(exp)) { result = bzla_bv_not(bzla->mm, bv); bzla_bv_free(bzla->mm, bv); } else result = bv; return result; } /*------------------------------------------------------------------------*/ static Bzla * new_exp_layer_clone_for_dual_prop(Bzla *bzla, BzlaNodeMap **exp_map, BzlaNode **root) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(exp_map); assert(root); double start; Bzla *clone; BzlaNode *cur, *and; BzlaPtrHashTableIterator it; /* empty formula */ if (bzla->unsynthesized_constraints->count == 0) return 0; start = bzla_util_time_stamp(); clone = bzla_clone_exp_layer(bzla, exp_map, true); assert(!clone->synthesized_constraints->count); assert(clone->embedded_constraints->count == 0); assert(clone->unsynthesized_constraints->count); bzla_opt_set(clone, BZLA_OPT_PRODUCE_MODELS, 0); bzla_opt_set(clone, BZLA_OPT_INCREMENTAL, 1); // bzla_opt_set (clone, BZLA_OPT_LOGLEVEL, 0); // bzla_opt_set (clone, BZLA_OPT_VERBOSITY, 0); bzla_opt_set(clone, BZLA_OPT_FUN_DUAL_PROP, 0); assert(!bzla_sat_is_initialized(bzla_get_sat_mgr(clone))); bzla_opt_set_str(clone, BZLA_OPT_SAT_ENGINE, "plain=1"); configure_sat_mgr(clone); bzla_iter_hashptr_init(&it, clone->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, clone->assumptions); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); bzla_node_real_addr(cur)->constraint = 0; if (!*root) { *root = bzla_node_copy(clone, cur); } else { and = bzla_exp_bv_and(clone, *root, cur); bzla_node_release(clone, *root); *root = and; } } bzla_iter_hashptr_init(&it, clone->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, clone->assumptions); while (bzla_iter_hashptr_has_next(&it)) bzla_node_release(clone, bzla_iter_hashptr_next(&it)); bzla_hashptr_table_delete(clone->unsynthesized_constraints); bzla_hashptr_table_delete(clone->assumptions); clone->unsynthesized_constraints = bzla_hashptr_table_new(clone->mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); clone->assumptions = bzla_hashptr_table_new(clone->mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); BZLA_FUN_SOLVER(bzla)->time.search_init_apps_cloning += bzla_util_time_stamp() - start; return clone; } static void assume_inputs(Bzla *bzla, Bzla *clone, BzlaNodePtrStack *inputs, BzlaNodeMap *exp_map, BzlaNodeMap *key_map, BzlaNodeMap *assumptions) { assert(bzla); assert(clone); assert(inputs); assert(exp_map); assert(key_map); assert(key_map->table->count == 0); assert(assumptions); uint32_t i; BzlaNode *cur_bzla, *cur_clone, *bv_const, *bv_eq; BzlaBitVector *bv; for (i = 0; i < BZLA_COUNT_STACK(*inputs); i++) { cur_bzla = BZLA_PEEK_STACK(*inputs, i); cur_clone = bzla_nodemap_mapped(exp_map, cur_bzla); assert(cur_clone); assert(bzla_node_is_regular(cur_clone)); assert(!bzla_nodemap_mapped(key_map, cur_clone)); bzla_nodemap_map(key_map, cur_clone, cur_bzla); assert(bzla_node_is_regular(cur_bzla)); bv = get_bv_assignment(bzla, cur_bzla); bv_const = bzla_exp_bv_const(clone, bv); bzla_bv_free(bzla->mm, bv); bv_eq = bzla_exp_eq(clone, cur_clone, bv_const); BZLALOG(1, "assume input: %s (%s = %s)", bzla_util_node2string(bv_eq), bzla_util_node2string(cur_bzla), bzla_util_node2string(bv_const)); bzla_assume_exp(clone, bv_eq); bzla_nodemap_map(assumptions, bv_eq, cur_clone); bzla_node_release(clone, bv_const); bzla_node_release(clone, bv_eq); } } static BzlaNode * create_function_disequality_witness(Bzla *bzla, BzlaNode *feq) { assert(bzla_node_is_regular(feq)); assert(bzla_node_is_fun_eq(feq)); BzlaMemMgr *mm; BzlaNode *var, *app0, *app1, *eq, *arg; BzlaSortId funsort, sort; BzlaNodePtrStack args; BzlaTupleSortIterator it; mm = bzla->mm; BZLA_INIT_STACK(mm, args); funsort = bzla_sort_fun_get_domain(bzla, bzla_node_get_sort_id(feq->e[0])); size_t len = bzla_util_num_digits(feq->id) + strlen("witness()_"); uint32_t i = 0; bzla_iter_tuple_sort_init(&it, bzla, funsort); while (bzla_iter_tuple_sort_has_next(&it)) { sort = bzla_iter_tuple_sort_next(&it); assert(!bzla_sort_is_fun(bzla, sort)); size_t buf_len = len + bzla_util_num_digits(i) + 1; char buf[buf_len]; snprintf(buf, buf_len, "witness(%u)_%u", feq->id, i); var = bzla_exp_var(bzla, sort, buf); BZLA_PUSH_STACK(args, var); ++i; } arg = bzla_exp_args(bzla, args.start, BZLA_COUNT_STACK(args)); app0 = bzla_node_create_apply(bzla, feq->e[0], arg); app1 = bzla_node_create_apply(bzla, feq->e[1], arg); eq = bzla_exp_eq(bzla, app0, app1); bzla_node_release(bzla, arg); bzla_node_release(bzla, app0); bzla_node_release(bzla, app1); while (!BZLA_EMPTY_STACK(args)) bzla_node_release(bzla, BZLA_POP_STACK(args)); BZLA_RELEASE_STACK(args); return bzla_node_invert(eq); } /* For every function equality f = g, add a witness for disequality: * f != g -> f(a) != g(a) */ static void add_function_disequality_witnesses(Bzla *bzla) { uint32_t i; BzlaNode *cur, *neq, *con; BzlaNodePtrStack feqs, visit; BzlaPtrHashTableIterator it; BzlaPtrHashBucket *b; BzlaMemMgr *mm; BzlaIntHashTable *cache; mm = bzla->mm; BZLA_INIT_STACK(mm, visit); bzla_iter_hashptr_init(&it, bzla->inputs); bzla_iter_hashptr_queue(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); /* Note: We don't have to traverse synthesized_constraints as we already * created witnesses for them in a previous check-sat call. */ while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); cur = bzla_node_get_simplified(bzla, cur); BZLA_PUSH_STACK(visit, cur); } /* collect all reachable function equalities */ cache = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, feqs); while (!BZLA_EMPTY_STACK(visit)) { cur = bzla_node_real_addr(BZLA_POP_STACK(visit)); if (bzla_hashint_table_contains(cache, cur->id)) continue; bzla_hashint_table_add(cache, cur->id); if (bzla_node_is_fun_eq(cur) && !cur->parameterized) { b = bzla_hashptr_table_get(bzla->feqs, cur); /* already visited and created inequality constraint in a previous * sat call */ if (b->data.as_int) continue; BZLA_PUSH_STACK(feqs, cur); /* if the lambdas are not arrays, we cannot handle equalities */ // BZLA_ABORT( // (bzla_node_is_lambda(cur->e[0]) && !bzla_node_is_array(cur->e[0])) // || (bzla_node_is_lambda(cur->e[1]) // && !bzla_node_is_array(cur->e[1])), // "equality over non-array lambdas not supported yet"); } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(visit, cur->e[i]); } /* add inequality constraint for every reachable function equality */ while (!BZLA_EMPTY_STACK(feqs)) { cur = BZLA_POP_STACK(feqs); assert(bzla_node_is_fun_eq(cur)); assert(!cur->parameterized); b = bzla_hashptr_table_get(bzla->feqs, cur); assert(b); assert(b->data.as_int == 0); b->data.as_int = 1; neq = create_function_disequality_witness(bzla, cur); con = bzla_exp_implies(bzla, bzla_node_invert(cur), neq); bzla_assert_exp(bzla, con); bzla_node_release(bzla, con); bzla_node_release(bzla, neq); BZLALOG(2, "add inequality constraint for %s", bzla_util_node2string(cur)); } BZLA_RELEASE_STACK(visit); BZLA_RELEASE_STACK(feqs); bzla_hashint_table_delete(cache); } static int32_t sat_aux_bzla_dual_prop(Bzla *bzla) { assert(bzla); BzlaSolverResult result; if (bzla->inconsistent) goto DONE; BZLA_MSG(bzla->msg, 1, "calling SAT"); configure_sat_mgr(bzla); if (bzla->valid_assignments == 1) bzla_reset_incremental_usage(bzla); bzla_add_again_assumptions(bzla); assert(bzla->synthesized_constraints->count == 0); assert(bzla->unsynthesized_constraints->count == 0); assert(bzla->embedded_constraints->count == 0); assert(bzla_dbg_check_all_hash_tables_proxy_free(bzla)); assert(bzla_dbg_check_all_hash_tables_simp_free(bzla)); assert(bzla_dbg_check_assumptions_simp_free(bzla)); result = timed_sat_sat(bzla, -1); assert(result == BZLA_RESULT_UNSAT || (bzla_terminate(bzla) && result == BZLA_RESULT_UNKNOWN)); DONE: result = BZLA_RESULT_UNSAT; bzla->valid_assignments = 1; bzla->last_sat_result = result; return result; } static void collect_applies(Bzla *bzla, Bzla *clone, BzlaNodeMap *key_map, BzlaNodeMap *assumptions, BzlaIntHashTable *top_applies, BzlaNodePtrStack *top_applies_feq) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(clone); assert(key_map); assert(assumptions); assert(top_applies); assert(top_applies_feq); double start; uint32_t i; BzlaMemMgr *mm; BzlaFunSolver *slv; BzlaNode *cur_bzla, *cur_clone, *bv_eq; BzlaNodePtrStack failed_eqs; BzlaNodeMapIterator it; BzlaIntHashTable *mark; start = bzla_util_time_stamp(); mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); mark = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, failed_eqs); bzla_iter_nodemap_init(&it, assumptions); while (bzla_iter_nodemap_has_next(&it)) { bv_eq = bzla_iter_nodemap_next(&it); cur_clone = bzla_nodemap_mapped(assumptions, bv_eq); assert(cur_clone); /* Note: node mapping is normalized, revert */ if (bzla_node_is_inverted(cur_clone)) { bv_eq = bzla_node_invert(bv_eq); cur_clone = bzla_node_invert(cur_clone); } cur_bzla = bzla_nodemap_mapped(key_map, cur_clone); assert(cur_bzla); assert(bzla_node_is_regular(cur_bzla)); assert(bzla_node_is_bv_var(cur_bzla) || bzla_node_is_apply(cur_bzla) || bzla_node_is_fun_eq(cur_bzla)); if (bzla_node_is_bv_var(cur_bzla)) slv->stats.dp_assumed_vars += 1; else if (bzla_node_is_fun_eq(cur_bzla)) slv->stats.dp_assumed_eqs += 1; else { assert(bzla_node_is_apply(cur_bzla)); slv->stats.dp_assumed_applies += 1; } if (bzla_failed_exp(clone, bv_eq)) { BZLALOG(1, "failed: %s", bzla_util_node2string(cur_bzla)); if (bzla_node_is_bv_var(cur_bzla)) slv->stats.dp_failed_vars += 1; else if (bzla_node_is_fun_eq(cur_bzla)) { slv->stats.dp_failed_eqs += 1; BZLA_PUSH_STACK(failed_eqs, cur_bzla); } else { assert(bzla_node_is_apply(cur_bzla)); if (bzla_hashint_table_contains(mark, cur_bzla->id)) continue; slv->stats.dp_failed_applies += 1; bzla_hashint_table_add(mark, cur_bzla->id); bzla_hashint_table_add(top_applies, cur_bzla->id); } } } bzla_hashint_table_delete(mark); mark = bzla_hashint_table_new(mm); /* collect applies below failed function equalities */ while (!BZLA_EMPTY_STACK(failed_eqs)) { cur_bzla = bzla_node_real_addr(BZLA_POP_STACK(failed_eqs)); if (!cur_bzla->apply_below || bzla_hashint_table_contains(mark, cur_bzla->id)) continue; bzla_hashint_table_add(mark, cur_bzla->id); /* we only need the "top applies" below a failed function equality */ if (!cur_bzla->parameterized && bzla_node_is_apply(cur_bzla)) { BZLALOG(1, "apply below eq: %s", bzla_util_node2string(cur_bzla)); if (!bzla_hashint_table_contains(top_applies, cur_bzla->id)) { BZLA_PUSH_STACK(*top_applies_feq, cur_bzla); bzla_hashint_table_add(top_applies, cur_bzla->id); } continue; } for (i = 0; i < cur_bzla->arity; i++) BZLA_PUSH_STACK(failed_eqs, cur_bzla->e[i]); } BZLA_RELEASE_STACK(failed_eqs); bzla_hashint_table_delete(mark); slv->time.search_init_apps_collect_fa += bzla_util_time_stamp() - start; } static void set_up_dual_and_collect(Bzla *bzla, Bzla *clone, BzlaNode *clone_root, BzlaNodeMap *exp_map, BzlaNodePtrStack *inputs, BzlaNodePtrStack *top_applies) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(clone); assert(clone_root); assert(exp_map); assert(inputs); assert(top_applies); double delta; uint32_t i; BzlaNode *cur; BzlaFunSolver *slv; BzlaNodeMap *assumptions, *key_map; BzlaNodePtrStack sorted, topapps_feq; BzlaIntHashTable *topapps; delta = bzla_util_time_stamp(); slv = BZLA_FUN_SOLVER(bzla); assumptions = bzla_nodemap_new(bzla); key_map = bzla_nodemap_new(bzla); BZLA_INIT_STACK(bzla->mm, sorted); BZLA_FIT_STACK(sorted, BZLA_COUNT_STACK(*inputs)); memcpy(sorted.start, inputs->start, sizeof(BzlaNode *) * BZLA_COUNT_STACK(*inputs)); sorted.top = sorted.start + BZLA_COUNT_STACK(*inputs); BZLA_INIT_STACK(bzla->mm, topapps_feq); topapps = bzla_hashint_table_new(bzla->mm); /* assume root */ bzla_assume_exp(clone, bzla_node_invert(clone_root)); /* assume assignments of bv vars and applies, partial assignments are * assumed as partial assignment (as slice on resp. var/apply) */ switch (bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP_QSORT)) { case BZLA_DP_QSORT_ASC: qsort(sorted.start, BZLA_COUNT_STACK(sorted), sizeof(BzlaNode *), bzla_node_compare_by_id_qsort_asc); break; case BZLA_DP_QSORT_DESC: qsort(sorted.start, BZLA_COUNT_STACK(sorted), sizeof(BzlaNode *), bzla_node_compare_by_id_qsort_desc); break; default: assert(bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP_QSORT) == BZLA_DP_QSORT_JUST); bzla_dcr_compute_scores_dual_prop(bzla); qsort(sorted.start, BZLA_COUNT_STACK(sorted), sizeof(BzlaNode *), bzla_dcr_compare_scores_qsort); } assume_inputs(bzla, clone, &sorted, exp_map, key_map, assumptions); slv->time.search_init_apps_collect_var_apps += bzla_util_time_stamp() - delta; /* let solver determine failed assumptions */ delta = bzla_util_time_stamp(); sat_aux_bzla_dual_prop(clone); assert(clone->last_sat_result == BZLA_RESULT_UNSAT); slv->time.search_init_apps_sat += bzla_util_time_stamp() - delta; /* extract partial model via failed assumptions */ collect_applies(bzla, clone, key_map, assumptions, topapps, &topapps_feq); for (i = 0; i < BZLA_COUNT_STACK(*inputs); i++) { cur = BZLA_PEEK_STACK(*inputs, i); if (bzla_hashint_table_contains(topapps, bzla_node_real_addr(cur)->id)) BZLA_PUSH_STACK(*top_applies, cur); } for (i = 0; i < BZLA_COUNT_STACK(topapps_feq); i++) BZLA_PUSH_STACK(*top_applies, BZLA_PEEK_STACK(topapps_feq, i)); BZLA_RELEASE_STACK(sorted); BZLA_RELEASE_STACK(topapps_feq); bzla_hashint_table_delete(topapps); bzla_nodemap_delete(assumptions); bzla_nodemap_delete(key_map); } static void search_initial_applies_dual_prop(Bzla *bzla, Bzla *clone, BzlaNode *clone_root, BzlaNodeMap *exp_map, BzlaNodePtrStack *top_applies) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(clone); assert(clone_root); assert(exp_map); assert(top_applies); double start; uint32_t i; BzlaNode *cur; BzlaNodePtrStack stack, inputs; BzlaPtrHashTableIterator it; BzlaSATMgr *smgr; BzlaFunSolver *slv; BzlaIntHashTable *mark; BzlaMemMgr *mm; start = bzla_util_time_stamp(); BZLALOG(1, ""); BZLALOG(1, "*** search initial applies"); mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); slv->stats.dp_failed_vars = 0; slv->stats.dp_assumed_vars = 0; slv->stats.dp_failed_applies = 0; slv->stats.dp_assumed_applies = 0; smgr = bzla_get_sat_mgr(bzla); if (!smgr->inc_required) return; mark = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, stack); BZLA_INIT_STACK(mm, inputs); bzla_iter_hashptr_init(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); BZLA_PUSH_STACK(stack, cur); while (!BZLA_EMPTY_STACK(stack)) { cur = bzla_node_real_addr(BZLA_POP_STACK(stack)); if (bzla_hashint_table_contains(mark, cur->id)) continue; bzla_hashint_table_add(mark, cur->id); if (bzla_node_is_bv_var(cur) || bzla_node_is_fun_eq(cur) || bzla_node_is_apply(cur)) { assert(bzla_node_is_synth(cur)); BZLA_PUSH_STACK(inputs, cur); continue; } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(stack, cur->e[i]); } } (void) bzla_node_compare_by_id_qsort_asc; set_up_dual_and_collect( bzla, clone, clone_root, exp_map, &inputs, top_applies); BZLA_RELEASE_STACK(stack); BZLA_RELEASE_STACK(inputs); bzla_hashint_table_delete(mark); slv->time.search_init_apps += bzla_util_time_stamp() - start; } static void add_lemma_to_dual_prop_clone(Bzla *bzla, Bzla *clone, BzlaNode **root, BzlaNode *lemma, BzlaNodeMap *exp_map) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(clone); assert(lemma); BzlaNode *clemma, *and; /* clone and rebuild lemma with rewrite level 0 (as we want the exact * expression) */ clemma = bzla_clone_recursively_rebuild_exp(bzla, clone, lemma, exp_map, 0); assert(clemma); and = bzla_exp_bv_and(clone, *root, clemma); bzla_node_release(clone, clemma); bzla_node_release(clone, *root); *root = and; } /*------------------------------------------------------------------------*/ static void search_initial_applies_bv_skeleton(BzlaFunSolver *slv, BzlaNodePtrStack *applies, BzlaIntHashTable *cache) { assert(slv); assert(applies); double start; uint32_t i; BzlaNode *cur; BzlaNodePtrStack stack; BzlaMemMgr *mm; Bzla *bzla = slv->bzla; start = bzla_util_time_stamp(); BZLALOG(1, ""); BZLALOG(1, "*** search initial applies"); mm = bzla->mm; BZLA_INIT_STACK(mm, stack); for (size_t j = 0; j < BZLA_COUNT_STACK(slv->constraints); ++j) { cur = BZLA_PEEK_STACK(slv->constraints, j); BZLA_PUSH_STACK(stack, cur); while (!BZLA_EMPTY_STACK(stack)) { cur = BZLA_POP_STACK(stack); cur = bzla_node_real_addr(bzla_node_get_simplified(bzla, cur)); if (bzla_hashint_table_contains(cache, cur->id)) continue; bzla_hashint_table_add(cache, cur->id); if (bzla_node_is_quantifier(cur)) continue; if (bzla_node_is_apply(cur) && !cur->parameterized) { // assert (bzla_node_is_synth (cur)); BZLALOG(1, "initial apply: %s", bzla_util_node2string(cur)); BZLA_PUSH_STACK(*applies, cur); continue; } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(stack, cur->e[i]); } bzla_node_release(bzla, BZLA_PEEK_STACK(slv->constraints, j)); } BZLA_RESET_STACK(slv->constraints); BZLA_RELEASE_STACK(stack); /* The UFs introduced while word-blasting min/max/to_sbv/to_ubv FP terms do * not occur in any formula reachable from the root constraints since they * only encode undefined values. However, for these UFs we still have to * check the consistency of the corresponding function applications. */ BzlaNode *uf; BzlaNodePtrStack ufs; BZLA_INIT_STACK(bzla->mm, ufs); bzla_fp_word_blaster_get_introduced_ufs(bzla, &ufs); BzlaNodeIterator it; for (size_t i = 0; i < BZLA_COUNT_STACK(ufs); ++i) { uf = BZLA_PEEK_STACK(ufs, i); bzla_iter_parent_init(&it, uf); while (bzla_iter_parent_has_next(&it)) { cur = bzla_iter_parent_next(&it); BZLALOG(1, "initial apply: %s", bzla_util_node2string(cur)); BZLA_PUSH_STACK(*applies, cur); } } BZLA_RELEASE_STACK(ufs); BZLA_FUN_SOLVER(bzla)->time.search_init_apps += bzla_util_time_stamp() - start; } static void search_initial_applies_just(Bzla *bzla, BzlaNodePtrStack *top_applies) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(top_applies); assert(bzla->unsynthesized_constraints->count == 0); uint32_t i, h; int32_t a, a0, a1; double start; BzlaNode *cur, *e0, *e1; BzlaPtrHashTableIterator it; BzlaNodePtrStack stack; BzlaAIGMgr *amgr; BzlaIntHashTable *mark; BzlaMemMgr *mm; start = bzla_util_time_stamp(); BZLALOG(1, ""); BZLALOG(1, "*** search initial applies"); mm = bzla->mm; amgr = bzla_get_aig_mgr(bzla); h = bzla_opt_get(bzla, BZLA_OPT_FUN_JUST_HEURISTIC); mark = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, stack); bzla_dcr_compute_scores(bzla); bzla_iter_hashptr_init(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); BZLA_PUSH_STACK(stack, cur); while (!BZLA_EMPTY_STACK(stack)) { cur = bzla_node_real_addr(BZLA_POP_STACK(stack)); if (bzla_hashint_table_contains(mark, cur->id)) continue; bzla_hashint_table_add(mark, cur->id); if (bzla_node_is_apply(cur) && !cur->parameterized) { BZLALOG(1, "initial apply: %s", bzla_util_node2string(cur)); BZLA_PUSH_STACK(*top_applies, cur); continue; } if (!cur->parameterized && !bzla_node_is_fun(cur) && !bzla_node_is_args(cur) && bzla_node_bv_get_width(bzla, cur) == 1) { switch (cur->kind) { case BZLA_FUN_EQ_NODE: a = bzla_node_is_synth(cur) ? bzla_aig_get_assignment(amgr, cur->av->aigs[0]) : 0; // 'x'; if (a == 1 || a == 0) goto PUSH_CHILDREN; /* if equality is false (-1), we do not need to check * applies below for consistency as it is sufficient to * check the witnesses of inequality */ break; case BZLA_BV_AND_NODE: a = bzla_node_is_synth(cur) ? bzla_aig_get_assignment(amgr, cur->av->aigs[0]) : 0; // 'x' e0 = bzla_node_real_addr(cur->e[0]); e1 = bzla_node_real_addr(cur->e[1]); a0 = bzla_node_is_synth(e0) ? bzla_aig_get_assignment(amgr, e0->av->aigs[0]) : 0; // 'x' if (a0 && bzla_node_is_inverted(cur->e[0])) a0 *= -1; a1 = bzla_node_is_synth(e1) ? bzla_aig_get_assignment(amgr, e1->av->aigs[0]) : 0; // 'x' if (a1 && bzla_node_is_inverted(cur->e[1])) a1 *= -1; if (a != -1) // and = 1 or x { BZLA_PUSH_STACK(stack, cur->e[0]); BZLA_PUSH_STACK(stack, cur->e[1]); } else // and = 0 { if (a0 == -1 && a1 == -1) // both inputs 0 { /* branch selection w.r.t selected heuristic */ if (h == BZLA_JUST_HEUR_BRANCH_MIN_APP || h == BZLA_JUST_HEUR_BRANCH_MIN_DEP) { if (bzla_dcr_compare_scores(bzla, cur->e[0], cur->e[1])) BZLA_PUSH_STACK(stack, cur->e[0]); else BZLA_PUSH_STACK(stack, cur->e[1]); } else { assert(h == BZLA_JUST_HEUR_BRANCH_LEFT); BZLA_PUSH_STACK(stack, cur->e[0]); } } else if (a0 == -1) // only one input 0 BZLA_PUSH_STACK(stack, cur->e[0]); else if (a1 == -1) // only one input 0 BZLA_PUSH_STACK(stack, cur->e[1]); else if (a0 == 0 && a1 == 1) // first input x, second 0 BZLA_PUSH_STACK(stack, cur->e[0]); else if (a0 == 1 && a1 == 0) // first input 0, second x BZLA_PUSH_STACK(stack, cur->e[1]); else // both inputs x { assert(a0 == 0); assert(a1 == 0); BZLA_PUSH_STACK(stack, cur->e[0]); BZLA_PUSH_STACK(stack, cur->e[1]); } } break; #if 0 case BZLA_BCOND_NODE: BZLA_PUSH_STACK (stack, cur->e[0]); a = bzla_node_is_synth (bzla_node_real_addr (cur->e[0])) ? bzla_aig_get_assignment ( amgr, bzla_node_real_addr (cur->e[0])->av->aigs[0]) : 0; // 'x'; if (bzla_node_is_inverted (cur->e[0])) a *= -1; if (a == 1) // then BZLA_PUSH_STACK (stack, cur->e[1]); else if (a == -1) BZLA_PUSH_STACK (stack, cur->e[2]); else // else { BZLA_PUSH_STACK (stack, cur->e[1]); BZLA_PUSH_STACK (stack, cur->e[2]); } break; #endif default: goto PUSH_CHILDREN; } } else { PUSH_CHILDREN: for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(stack, cur->e[i]); } } } BZLA_RELEASE_STACK(stack); bzla_hashint_table_delete(mark); BZLA_FUN_SOLVER(bzla)->time.search_init_apps += bzla_util_time_stamp() - start; } static bool equal_bv_assignments(BzlaNode *exp0, BzlaNode *exp1) { assert(!bzla_node_is_proxy(exp0)); assert(!bzla_node_is_proxy(exp1)); bool equal; Bzla *bzla; BzlaBitVector *bv0, *bv1; bzla = bzla_node_real_addr(exp0)->bzla; bv0 = get_bv_assignment(bzla, exp0); bv1 = get_bv_assignment(bzla, exp1); equal = bzla_bv_compare(bv0, bv1) == 0; bzla_bv_free(bzla->mm, bv0); bzla_bv_free(bzla->mm, bv1); return equal; } static int32_t compare_args_assignments(BzlaNode *e0, BzlaNode *e1) { assert(bzla_node_is_regular(e0)); assert(bzla_node_is_regular(e1)); assert(bzla_node_is_args(e0)); assert(bzla_node_is_args(e1)); assert(!bzla_node_is_proxy(e0)); assert(!bzla_node_is_proxy(e1)); bool equal; BzlaBitVector *bv0, *bv1; BzlaNode *arg0, *arg1; Bzla *bzla; BzlaArgsIterator it0, it1; bzla = e0->bzla; if (bzla_node_get_sort_id(e0) != bzla_node_get_sort_id(e1)) return 1; if (e0 == e1) return 0; bzla_iter_args_init(&it0, e0); bzla_iter_args_init(&it1, e1); while (bzla_iter_args_has_next(&it0)) { assert(bzla_iter_args_has_next(&it1)); arg0 = bzla_iter_args_next(&it0); arg1 = bzla_iter_args_next(&it1); bv0 = get_bv_assignment(bzla, arg0); bv1 = get_bv_assignment(bzla, arg1); equal = bzla_bv_compare(bv0, bv1) == 0; bzla_bv_free(bzla->mm, bv0); bzla_bv_free(bzla->mm, bv1); if (!equal) return 1; } return 0; } static uint32_t hash_args_assignment(BzlaNode *exp) { assert(exp); assert(bzla_node_is_regular(exp)); assert(bzla_node_is_args(exp)); uint32_t hash; Bzla *bzla; BzlaNode *arg; BzlaArgsIterator it; BzlaBitVector *bv; bzla = exp->bzla; hash = 0; bzla_iter_args_init(&it, exp); while (bzla_iter_args_has_next(&it)) { arg = bzla_iter_args_next(&it); bv = get_bv_assignment(bzla, arg); hash += bzla_bv_hash(bv); bzla_bv_free(bzla->mm, bv); } return hash; } static void collect_premisses(Bzla *bzla, BzlaNode *from, BzlaNode *to, BzlaNode *args, BzlaNodePtrStack *prem, BzlaIntHashTable *cache) { assert(bzla); assert(from); assert(to); assert(prem); assert(cache); assert(bzla_node_is_regular(from)); assert(bzla_node_is_regular(args)); assert(bzla_node_is_args(args)); assert(bzla_node_is_regular(to)); BZLALOG(1, "%s: %s, %s, %s", __FUNCTION__, bzla_util_node2string(from), bzla_util_node2string(to), bzla_util_node2string(args)); BzlaMemMgr *mm; BzlaNode *cur, *result, *tmp; BzlaBitVector *bv_assignment; mm = bzla->mm; /* follow propagation path and collect all conditions that have been * evaluated during propagation */ if (bzla_node_is_apply(from)) { assert(bzla_node_is_regular(to)); assert(bzla_node_is_fun(to)); assert(!bzla_node_is_simplified(from->e[0]) || bzla_opt_get(bzla, BZLA_OPT_PP_NONDESTR_SUBST)); cur = bzla_node_get_simplified(bzla, from->e[0]); for (;;) { assert(bzla_node_is_regular(cur)); assert(bzla_node_is_fun(cur)); assert(!bzla_node_is_simplified(cur)); if (cur == to) break; if (bzla_node_is_fun_cond(cur)) { bv_assignment = get_bv_assignment(bzla, cur->e[0]); /* propagate over function ite */ if (bzla_bv_is_true(bv_assignment)) { tmp = cur->e[0]; cur = cur->e[1]; } else { tmp = bzla_node_invert(cur->e[0]); cur = cur->e[2]; } if (!bzla_hashint_table_contains(cache, bzla_node_get_id(tmp))) BZLA_PUSH_STACK(*prem, bzla_node_copy(bzla, tmp)); bzla_bv_free(mm, bv_assignment); continue; } else if (bzla_node_is_update(cur)) { tmp = cur->e[1]; assert(compare_args_assignments(tmp, from->e[1]) != 0); if (!bzla_hashint_table_contains(cache, bzla_node_get_id(tmp))) BZLA_PUSH_STACK(*prem, bzla_node_copy(bzla, tmp)); cur = cur->e[0]; } else { assert(bzla_node_is_lambda(cur)); bzla_beta_assign_args(bzla, cur, args); result = bzla_beta_reduce_partial_collect_new(bzla, cur, prem, cache); bzla_beta_unassign_params(bzla, cur); result = bzla_node_real_addr(result); assert(bzla_node_is_apply(result)); assert(result->e[1] == args); cur = result->e[0]; bzla_node_release(bzla, result); } } } else { // TODO: merge with above lambda case? assert(bzla_node_is_lambda(from)); cur = from; bzla_beta_assign_args(bzla, cur, args); result = bzla_beta_reduce_partial_collect_new(bzla, cur, prem, cache); bzla_beta_unassign_params(bzla, cur); assert(bzla_node_real_addr(result) == to); bzla_node_release(bzla, result); } } static BzlaNode * mk_equal_args(Bzla *bzla, BzlaNode *args1, BzlaNode *args2) { BzlaNode *arg1, *arg2, *eq, *tmp, *res = 0; BzlaArgsIterator it1, it2; bzla_iter_args_init(&it1, args1); bzla_iter_args_init(&it2, args2); while (bzla_iter_args_has_next(&it1)) { assert(bzla_iter_args_has_next(&it2)); arg1 = bzla_iter_args_next(&it1); arg2 = bzla_iter_args_next(&it2); eq = bzla_exp_eq(bzla, arg1, arg2); if (res) { tmp = bzla_exp_bv_and(bzla, res, eq); bzla_node_release(bzla, res); bzla_node_release(bzla, eq); res = tmp; } else res = eq; } assert(!bzla_iter_args_has_next(&it2)); return res; } static BzlaNode * mk_premise(Bzla *bzla, BzlaNode *args, BzlaNode *prem[], uint32_t num_prem) { uint32_t i; BzlaNode *cur, *res = 0, *tmp, *p; for (i = 0; i < num_prem; i++) { cur = prem[i]; if (bzla_node_is_args(cur)) p = bzla_node_invert(mk_equal_args(bzla, args, cur)); else p = bzla_node_copy(bzla, cur); if (res) { tmp = bzla_exp_bv_and(bzla, res, p); bzla_node_release(bzla, res); bzla_node_release(bzla, p); res = tmp; } else res = p; } return res; } static void add_lemma(Bzla *bzla, BzlaNode *fun, BzlaNode *app1, BzlaNode *app2) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(fun); assert(bzla_node_is_regular(fun)); assert(bzla_node_is_fun(fun)); assert(!fun->parameterized); assert(app1); assert(bzla_node_is_regular(app1)); assert(bzla_node_is_apply(app1)); assert(!app2 || bzla_node_is_regular(app2) || bzla_node_is_apply(app2)); double start; uint32_t i, lemma_size = 1; BzlaIntHashTable *cache_app1, *cache_app2; BzlaNodePtrStack prem_app1, prem_app2, prem; BzlaNode *value, *tmp, *and, *con, *lemma; BzlaMemMgr *mm; BzlaFunSolver *slv; start = bzla_util_time_stamp(); mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); cache_app1 = bzla_hashint_table_new(mm); cache_app2 = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, prem_app1); BZLA_INIT_STACK(mm, prem_app2); BZLA_INIT_STACK(mm, prem); /* collect premise and conclusion */ collect_premisses(bzla, app1, fun, app1->e[1], &prem_app1, cache_app1); tmp = mk_premise( bzla, app1->e[1], prem_app1.start, BZLA_COUNT_STACK(prem_app1)); BZLA_PUSH_STACK_IF(tmp != 0, prem, tmp); lemma_size += BZLA_COUNT_STACK(prem_app1); if (app2) /* function congruence axiom conflict */ { collect_premisses(bzla, app2, fun, app2->e[1], &prem_app2, cache_app2); tmp = mk_premise( bzla, app2->e[1], prem_app2.start, BZLA_COUNT_STACK(prem_app2)); BZLA_PUSH_STACK_IF(tmp != 0, prem, tmp); BZLA_PUSH_STACK(prem, mk_equal_args(bzla, app1->e[1], app2->e[1])); lemma_size += BZLA_COUNT_STACK(prem_app2); con = bzla_exp_eq(bzla, app1, app2); } else if (bzla_node_is_update(fun)) /* read over write conflict */ { BZLA_PUSH_STACK(prem, mk_equal_args(bzla, app1->e[1], fun->e[1])); lemma_size += bzla_node_args_get_arity(bzla, app1->e[1]); con = bzla_exp_eq(bzla, app1, fun->e[2]); } else /* beta reduction conflict */ { assert(bzla_node_is_lambda(fun)); bzla_beta_assign_args(bzla, fun, app1->e[1]); value = bzla_beta_reduce_partial(bzla, fun, 0); bzla_beta_unassign_params(bzla, fun); assert(!bzla_node_is_lambda(value)); /* path from conflicting fun to value */ collect_premisses(bzla, fun, bzla_node_real_addr(value), app1->e[1], &prem_app2, cache_app2); tmp = mk_premise( bzla, app1->e[1], prem_app2.start, BZLA_COUNT_STACK(prem_app2)); BZLA_PUSH_STACK_IF(tmp != 0, prem, tmp); lemma_size += BZLA_COUNT_STACK(prem_app2); con = bzla_exp_eq(bzla, app1, value); bzla_node_release(bzla, value); } /* create lemma */ if (BZLA_EMPTY_STACK(prem)) lemma = con; else { and = bzla_exp_bv_and_n(bzla, prem.start, BZLA_COUNT_STACK(prem)); lemma = bzla_exp_implies(bzla, and, con); bzla_node_release(bzla, and); bzla_node_release(bzla, con); } assert(lemma != bzla->true_exp); if (!bzla_hashptr_table_get(slv->lemmas, lemma)) { bzla_hashptr_table_add(slv->lemmas, bzla_node_copy(bzla, lemma)); BZLA_PUSH_STACK(slv->cur_lemmas, lemma); slv->stats.lod_refinements++; slv->stats.lemmas_size_sum += lemma_size; if (lemma_size >= BZLA_SIZE_STACK(slv->stats.lemmas_size)) BZLA_FIT_STACK(slv->stats.lemmas_size, lemma_size); slv->stats.lemmas_size.start[lemma_size] += 1; } bzla_node_release(bzla, lemma); /* cleanup */ for (i = 0; i < BZLA_COUNT_STACK(prem); i++) bzla_node_release(bzla, BZLA_PEEK_STACK(prem, i)); for (i = 0; i < BZLA_COUNT_STACK(prem_app1); i++) bzla_node_release(bzla, BZLA_PEEK_STACK(prem_app1, i)); for (i = 0; i < BZLA_COUNT_STACK(prem_app2); i++) bzla_node_release(bzla, BZLA_PEEK_STACK(prem_app2, i)); BZLA_RELEASE_STACK(prem_app1); BZLA_RELEASE_STACK(prem_app2); BZLA_RELEASE_STACK(prem); bzla_hashint_table_delete(cache_app1); bzla_hashint_table_delete(cache_app2); BZLA_FUN_SOLVER(bzla)->time.lemma_gen += bzla_util_time_stamp() - start; } static void push_applies_for_propagation(Bzla *bzla, BzlaNode *exp, BzlaNodePtrStack *prop_stack, BzlaIntHashTable *apply_search_cache) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(exp); assert(prop_stack); uint32_t i; double start; BzlaFunSolver *slv; BzlaNode *cur; BzlaNodePtrStack visit; BzlaMemMgr *mm; start = bzla_util_time_stamp(); slv = BZLA_FUN_SOLVER(bzla); mm = bzla->mm; BZLA_INIT_STACK(mm, visit); BZLA_PUSH_STACK(visit, exp); do { cur = BZLA_POP_STACK(visit); assert(!bzla_node_is_simplified(cur) || bzla_opt_get(bzla, BZLA_OPT_PP_NONDESTR_SUBST)); cur = bzla_node_real_addr(bzla_node_get_simplified(bzla, cur)); assert(!cur->parameterized); assert(!bzla_node_is_fun(cur)); if (!cur->apply_below || bzla_hashint_table_contains(apply_search_cache, cur->id) || bzla_node_is_fun_eq(cur) || bzla_node_is_quantifier(cur)) continue; bzla_hashint_table_add(apply_search_cache, cur->id); if (bzla_node_is_apply(cur)) { BZLA_PUSH_STACK(*prop_stack, cur); BZLA_PUSH_STACK(*prop_stack, cur->e[0]); BZLALOG(2, "push apply: %s", bzla_util_node2string(cur)); continue; } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(visit, cur->e[i]); } while (!BZLA_EMPTY_STACK(visit)); BZLA_RELEASE_STACK(visit); slv->time.find_prop_app += bzla_util_time_stamp() - start; } static bool find_conflict_app(Bzla *bzla, BzlaNode *app, BzlaIntHashTable *conf_apps) { double start; bool res = false; uint32_t i; BzlaIntHashTable *cache; BzlaMemMgr *mm; BzlaNodePtrStack visit; BzlaNode *cur; start = bzla_util_time_stamp(); mm = bzla->mm; cache = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, visit); BZLA_PUSH_STACK(visit, app->e[1]); while (!BZLA_EMPTY_STACK(visit)) { cur = bzla_node_real_addr(BZLA_POP_STACK(visit)); if (!cur->apply_below || bzla_node_is_fun(cur) || bzla_hashint_table_contains(cache, cur->id)) continue; bzla_hashint_table_add(cache, cur->id); if (bzla_hashint_table_contains(conf_apps, cur->id)) { res = true; break; } if (bzla_node_is_apply(cur)) continue; for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(visit, cur->e[i]); } bzla_hashint_table_delete(cache); BZLA_RELEASE_STACK(visit); BZLA_FUN_SOLVER(bzla)->time.find_conf_app += bzla_util_time_stamp() - start; return res; } static void propagate(Bzla *bzla, BzlaNodePtrStack *prop_stack, BzlaPtrHashTable *cleanup_table, BzlaIntHashTable *apply_search_cache) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(prop_stack); assert(cleanup_table); assert(apply_search_cache); double start; uint32_t opt_eager_lemmas; bool prop_down, conflict, restart; BzlaBitVector *bv; BzlaMemMgr *mm; BzlaFunSolver *slv; BzlaNode *fun, *app, *args, *fun_value, *cur; BzlaNode *hashed_app; BzlaPtrHashBucket *b; BzlaPtrHashTableIterator it; BzlaPtrHashTable *conds; BzlaIntHashTable *conf_apps; start = bzla_util_time_stamp(); mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); conf_apps = bzla_hashint_table_new(mm); opt_eager_lemmas = bzla_opt_get(bzla, BZLA_OPT_FUN_EAGER_LEMMAS); BZLALOG(1, ""); BZLALOG(1, "*** %s", __FUNCTION__); while (!BZLA_EMPTY_STACK(*prop_stack)) { fun = bzla_node_get_simplified(bzla, BZLA_POP_STACK(*prop_stack)); assert(bzla_node_is_regular(fun)); assert(bzla_node_is_fun(fun)); assert(!bzla_node_is_simplified(fun)); assert(!BZLA_EMPTY_STACK(*prop_stack)); app = BZLA_POP_STACK(*prop_stack); assert(bzla_node_is_regular(app)); assert(bzla_node_is_apply(app)); conflict = false; restart = true; if (app->propagated) continue; app->propagated = 1; if (!bzla_hashptr_table_get(cleanup_table, app)) bzla_hashptr_table_add(cleanup_table, app); slv->stats.propagations++; BZLALOG(1, "propagate"); BZLALOG(1, " app: %s", bzla_util_node2string(app)); BZLALOG(1, " fun: %s", bzla_util_node2string(fun)); args = app->e[1]; assert(bzla_node_is_regular(args)); assert(bzla_node_is_args(args)); assert(!bzla_node_is_simplified(args) || bzla_opt_get(bzla, BZLA_OPT_PP_NONDESTR_SUBST)); args = bzla_node_get_simplified(bzla, args); assert(bzla_node_is_args(args)); assert(bzla_sort_fun_get_domain(bzla, bzla_node_get_sort_id(fun)) == bzla_node_get_sort_id(args)); push_applies_for_propagation(bzla, args, prop_stack, apply_search_cache); if (!fun->rho) { fun->rho = bzla_hashptr_table_new(mm, (BzlaHashPtr) hash_args_assignment, (BzlaCmpPtr) compare_args_assignments); if (!bzla_hashptr_table_get(cleanup_table, fun)) bzla_hashptr_table_add(cleanup_table, fun); } else { b = bzla_hashptr_table_get(fun->rho, args); if (b) { hashed_app = (BzlaNode *) b->data.as_ptr; assert(bzla_node_is_regular(hashed_app)); assert(bzla_node_is_apply(hashed_app)); /* function congruence conflict */ if (!equal_bv_assignments(hashed_app, app)) { BZLALOG(1, "\e[1;31m"); BZLALOG(1, "FC conflict at: %s", bzla_util_node2string(fun)); BZLALOG(1, "add_lemma:"); BZLALOG(1, " fun: %s", bzla_util_node2string(fun)); BZLALOG(1, " app1: %s", bzla_util_node2string(hashed_app)); BZLALOG(1, " app2: %s", bzla_util_node2string(app)); BZLALOG(1, "\e[0;39m"); if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_CONF) { bzla_hashint_table_add(conf_apps, app->id); restart = find_conflict_app(bzla, app, conf_apps); } else if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_ALL) restart = false; slv->stats.function_congruence_conflicts++; add_lemma(bzla, fun, hashed_app, app); // conflict = true; /* stop at first conflict */ if (restart) break; } continue; } } assert(fun->rho); assert(!bzla_hashptr_table_get(fun->rho, args)); bzla_hashptr_table_add(fun->rho, args)->data.as_ptr = app; BZLALOG(1, " save app: %s (%s)", bzla_util_node2string(args), bzla_util_node2string(app)); /* skip array vars/uf */ if (bzla_node_is_uf(fun)) continue; if (bzla_node_is_fun_cond(fun)) { push_applies_for_propagation( bzla, fun->e[0], prop_stack, apply_search_cache); bv = get_bv_assignment(bzla, fun->e[0]); /* propagate over function ite */ BZLALOG(1, " propagate down: %s", bzla_util_node2string(app)); app->propagated = 0; BZLA_PUSH_STACK(*prop_stack, app); if (bzla_bv_is_true(bv)) BZLA_PUSH_STACK(*prop_stack, fun->e[1]); else BZLA_PUSH_STACK(*prop_stack, fun->e[2]); bzla_bv_free(mm, bv); continue; } else if (bzla_node_is_update(fun)) { if (compare_args_assignments(fun->e[1], args) == 0) { if (!equal_bv_assignments(app, fun->e[2])) { BZLALOG(1, "\e[1;31m"); BZLALOG(1, "update conflict at: %s", bzla_util_node2string(fun)); BZLALOG(1, "add_lemma:"); BZLALOG(1, " fun: %s", bzla_util_node2string(fun)); BZLALOG(1, " app: %s", bzla_util_node2string(app)); BZLALOG(1, "\e[0;39m"); #if 0 if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_CONF) { bzla_hashint_table_add (conf_apps, app->id); restart = find_conflict_app (bzla, app, conf_apps); } else if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_ALL) restart = false; #endif slv->stats.beta_reduction_conflicts++; add_lemma(bzla, fun, app, 0); conflict = true; #if 0 /* stop at first conflict */ if (restart) break; #endif } } else { app->propagated = 0; BZLA_PUSH_STACK(*prop_stack, app); BZLA_PUSH_STACK(*prop_stack, fun->e[0]); slv->stats.propagations_down++; } push_applies_for_propagation( bzla, fun->e[1], prop_stack, apply_search_cache); push_applies_for_propagation( bzla, fun->e[2], prop_stack, apply_search_cache); continue; } assert(bzla_node_is_lambda(fun)); conds = bzla_hashptr_table_new(mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); bzla_beta_assign_args(bzla, fun, args); fun_value = bzla_beta_reduce_partial(bzla, fun, conds); assert(!bzla_node_is_fun(fun_value)); bzla_beta_unassign_params(bzla, fun); prop_down = false; if (!bzla_node_is_inverted(fun_value) && bzla_node_is_apply(fun_value)) prop_down = fun_value->e[1] == args; if (prop_down) { assert(bzla_node_is_apply(fun_value)); BZLA_PUSH_STACK(*prop_stack, app); BZLA_PUSH_STACK(*prop_stack, bzla_node_real_addr(fun_value)->e[0]); if (!bzla_hashptr_table_get(cleanup_table, BZLA_TOP_STACK(*prop_stack))) { bzla_hashptr_table_add( cleanup_table, bzla_node_copy(bzla, BZLA_TOP_STACK(*prop_stack))) ->data.flag = true; } slv->stats.propagations_down++; app->propagated = 0; BZLALOG(1, " propagate down: %s", bzla_util_node2string(app)); } else if (!equal_bv_assignments(app, fun_value)) { BZLALOG(1, "\e[1;31m"); BZLALOG(1, "BR conflict at: %s", bzla_util_node2string(fun)); BZLALOG(1, "add_lemma:"); BZLALOG(1, " fun: %s", bzla_util_node2string(fun)); BZLALOG(1, " app: %s", bzla_util_node2string(app)); BZLALOG(1, "\e[0;39m"); if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_CONF) { bzla_hashint_table_add(conf_apps, app->id); restart = find_conflict_app(bzla, app, conf_apps); } else if (opt_eager_lemmas == BZLA_FUN_EAGER_LEMMAS_ALL) restart = false; slv->stats.beta_reduction_conflicts++; add_lemma(bzla, fun, app, 0); conflict = true; } /* we have a conflict and the values are inconsistent, we do not have * to push applies onto 'prop_stack' that produce this inconsistent * value */ if (conflict) { bzla_iter_hashptr_init(&it, conds); while (bzla_iter_hashptr_has_next(&it)) bzla_node_release(bzla, bzla_iter_hashptr_next(&it)); } /* push applies onto 'prop_stack' that are necesary to derive 'fun_value' */ else { /* in case of down propagation 'fun_value' is a function application * and we can propagate 'app' instead. hence, we to not have to * push 'fun_value' onto 'prop_stack'. */ if (!prop_down) push_applies_for_propagation( bzla, fun_value, prop_stack, apply_search_cache); /* push applies in evaluated conditions */ bzla_iter_hashptr_init(&it, conds); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); push_applies_for_propagation(bzla, cur, prop_stack, apply_search_cache); bzla_node_release(bzla, cur); } } bzla_hashptr_table_delete(conds); bzla_node_release(bzla, fun_value); /* stop at first conflict */ if (restart && conflict) break; } bzla_hashint_table_delete(conf_apps); slv->time.prop += bzla_util_time_stamp() - start; } /* generate hash table for function 'fun' consisting of all rho and static_rho * hash tables. */ static BzlaPtrHashTable * generate_table(Bzla *bzla, BzlaNode *fun, BzlaNode **base_array) { uint32_t i; BzlaMemMgr *mm; BzlaNode *cur, *value, *args, *cur_fun; BzlaPtrHashTable *table, *rho, *static_rho; BzlaNodePtrStack visit; BzlaIntHashTable *cache; BzlaPtrHashTableIterator it; BzlaBitVector *evalbv; mm = bzla->mm; table = bzla_hashptr_table_new(mm, (BzlaHashPtr) hash_args_assignment, (BzlaCmpPtr) compare_args_assignments); cache = bzla_hashint_table_new(mm); BZLA_INIT_STACK(mm, visit); BZLA_PUSH_STACK(visit, fun); cur_fun = 0; while (!BZLA_EMPTY_STACK(visit)) { cur = bzla_node_real_addr(BZLA_POP_STACK(visit)); if (bzla_hashint_table_contains(cache, cur->id) || (!bzla_node_is_fun(cur) && !cur->parameterized)) continue; bzla_hashint_table_add(cache, cur->id); /* NOTE: all encountered lambda nodes need to be arrays, * in any other case we fully support equality over UFs and * conditionals. */ if (bzla_node_is_fun(cur)) { rho = cur->rho; static_rho = 0; cur_fun = cur; if (bzla_node_is_lambda(cur) && bzla_node_is_array(cur)) { assert(cur->is_array); static_rho = bzla_node_lambda_get_static_rho(cur); assert(!bzla_node_real_addr(cur->e[1])->parameterized || static_rho); } else if (bzla_node_is_fun_cond(cur)) { evalbv = get_bv_assignment(bzla, cur->e[0]); if (bzla_bv_is_true(evalbv)) BZLA_PUSH_STACK(visit, cur->e[1]); else BZLA_PUSH_STACK(visit, cur->e[2]); bzla_bv_free(mm, evalbv); } else if (bzla_node_is_update(cur)) { if (!bzla_hashptr_table_get(table, cur->e[1])) bzla_hashptr_table_add(table, cur->e[1])->data.as_ptr = cur->e[2]; BZLA_PUSH_STACK(visit, cur->e[0]); } if (rho) { bzla_iter_hashptr_init(&it, rho); if (static_rho) bzla_iter_hashptr_queue(&it, static_rho); } else if (static_rho) bzla_iter_hashptr_init(&it, static_rho); if (rho || static_rho) { while (bzla_iter_hashptr_has_next(&it)) { value = it.bucket->data.as_ptr; assert(!bzla_node_is_proxy(value)); args = bzla_iter_hashptr_next(&it); assert(!bzla_node_is_proxy(args)); if (!bzla_hashptr_table_get(table, args)) bzla_hashptr_table_add(table, args)->data.as_ptr = value; } } /* child already pushed w.r.t. evaluation of condition */ if (bzla_node_is_fun_cond(cur) || bzla_node_is_update(cur) /* do not traverse further down if it's a non-array lambda. */ || (bzla_node_is_lambda(cur) && !bzla_node_is_array(cur))) continue; } for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(visit, cur->e[i]); } assert(cur_fun); *base_array = cur_fun; BZLA_RELEASE_STACK(visit); bzla_hashint_table_delete(cache); return table; } static void add_extensionality_lemmas(Bzla *bzla) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); double start, delta; bool skip; BzlaBitVector *evalbv; uint32_t num_lemmas = 0; BzlaNode *cur, *cur_args, *app0, *app1, *eq, *con, *value; BzlaNode *base0, *base1; BzlaPtrHashTableIterator it; BzlaPtrHashTable *table0, *table1, *conflicts; BzlaPtrHashTableIterator hit; BzlaNodePtrStack feqs, const_arrays; BzlaMemMgr *mm; BzlaPtrHashBucket *b; BzlaFunSolver *slv; start = bzla_util_time_stamp(); BZLALOG(1, ""); BZLALOG(1, "*** %s", __FUNCTION__); slv = BZLA_FUN_SOLVER(bzla); mm = bzla->mm; BZLA_INIT_STACK(mm, feqs); BZLA_INIT_STACK(mm, const_arrays); /* collect all reachable function equalities */ bzla_iter_hashptr_init(&it, bzla->feqs); while (bzla_iter_hashptr_has_next(&it)) { BZLA_PUSH_STACK(feqs, bzla_iter_hashptr_next(&it)); } BzlaUnionFind *ufind = bzla_ufind_new(bzla->mm); while (!BZLA_EMPTY_STACK(feqs)) { cur = bzla_node_get_simplified(bzla, BZLA_POP_STACK(feqs)); if (!bzla_node_is_fun_eq(cur)) continue; evalbv = get_bv_assignment(bzla, cur); assert(evalbv); skip = bzla_bv_is_false(evalbv); bzla_bv_free(bzla->mm, evalbv); if (skip) continue; base0 = base1 = 0; table0 = generate_table(bzla, cur->e[0], &base0); table1 = generate_table(bzla, cur->e[1], &base1); assert(base0); assert(base1); bzla_ufind_merge(ufind, base0, base1); BZLA_PUSH_STACK_IF(bzla_node_is_const_array(base0), const_arrays, base0); BZLA_PUSH_STACK_IF(bzla_node_is_const_array(base1), const_arrays, base1); conflicts = bzla_hashptr_table_new(mm, (BzlaHashPtr) hash_args_assignment, (BzlaCmpPtr) compare_args_assignments); bzla_iter_hashptr_init(&hit, table0); while (bzla_iter_hashptr_has_next(&hit)) { value = hit.bucket->data.as_ptr; cur_args = bzla_iter_hashptr_next(&hit); b = bzla_hashptr_table_get(table1, cur_args); if (bzla_hashptr_table_get(conflicts, cur_args)) continue; if (!b || !equal_bv_assignments(value, b->data.as_ptr)) bzla_hashptr_table_add(conflicts, cur_args); } bzla_iter_hashptr_init(&hit, table1); while (bzla_iter_hashptr_has_next(&hit)) { value = hit.bucket->data.as_ptr; cur_args = bzla_iter_hashptr_next(&hit); b = bzla_hashptr_table_get(table0, cur_args); if (bzla_hashptr_table_get(conflicts, cur_args)) continue; if (!b || !equal_bv_assignments(value, b->data.as_ptr)) bzla_hashptr_table_add(conflicts, cur_args); } BZLALOG(1, " %s", bzla_util_node2string(cur)); bzla_iter_hashptr_init(&hit, conflicts); while (bzla_iter_hashptr_has_next(&hit)) { cur_args = bzla_iter_hashptr_next(&hit); app0 = bzla_exp_apply(bzla, cur->e[0], cur_args); app1 = bzla_exp_apply(bzla, cur->e[1], cur_args); eq = bzla_exp_eq(bzla, app0, app1); con = bzla_exp_implies(bzla, cur, eq); /* add instantiation of extensionality lemma */ if (!bzla_hashptr_table_get(slv->lemmas, con)) { bzla_hashptr_table_add(slv->lemmas, bzla_node_copy(bzla, con)); BZLA_PUSH_STACK(slv->cur_lemmas, con); slv->stats.extensionality_lemmas++; slv->stats.lod_refinements++; num_lemmas++; BZLALOG(1, " %s, %s", bzla_util_node2string(app0), bzla_util_node2string(app1)); } bzla_node_release(bzla, app0); bzla_node_release(bzla, app1); bzla_node_release(bzla, eq); bzla_node_release(bzla, con); } bzla_hashptr_table_delete(conflicts); bzla_hashptr_table_delete(table0); bzla_hashptr_table_delete(table1); } BZLA_RELEASE_STACK(feqs); /* No conflicts found. Check if we have positive (chains of) equalities over * constant arrays. */ if (num_lemmas == 0) { int32_t id; BzlaIntHashTable *cache = bzla_hashint_map_new(bzla->mm); BzlaNode *ca; BzlaHashTableData *d; BzlaBitVector *bv0, *bv1; for (size_t i = 0; i < BZLA_COUNT_STACK(const_arrays); i++) { ca = BZLA_PEEK_STACK(const_arrays, i); id = bzla_node_get_id(bzla_ufind_get_repr(ufind, ca)); assert(id > 0); if ((d = bzla_hashint_map_get(cache, id))) { bv0 = get_bv_assignment(bzla, ca->e[1]); bv1 = get_bv_assignment(bzla, ((BzlaNode *) d->as_ptr)->e[1]); BZLALOG(1, "found equality over constant array: %s and %s\n", bzla_util_node2string(d->as_ptr), bzla_util_node2string(ca)); BZLA_ABORT(bzla_bv_compare(bv0, bv1), "Found positive equality over two constant arrays, " "which is currently not supported."); bzla_bv_free(mm, bv0); bzla_bv_free(mm, bv1); } else { bzla_hashint_map_add(cache, id)->as_ptr = ca; } } bzla_hashint_map_delete(cache); } BZLA_RELEASE_STACK(const_arrays); bzla_ufind_delete(ufind); delta = bzla_util_time_stamp() - start; BZLALOG( 1, " added %u extensionality lemma in %.2f seconds", num_lemmas, delta); slv->time.check_extensionality += delta; } /* Find and collect all unreachable apply nodes. */ static void push_unreachable_applies(Bzla *bzla, BzlaNodePtrStack *init_apps) { uint32_t i; BzlaNode *cur; BzlaIntHashTable *cache; BzlaPtrHashTableIterator it; BzlaNodePtrStack visit; cache = bzla_hashint_table_new(bzla->mm); BZLA_INIT_STACK(bzla->mm, visit); /* Cache reachable nodes. */ bzla_iter_hashptr_init(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); while (bzla_iter_hashptr_has_next(&it)) { cur = bzla_iter_hashptr_next(&it); BZLA_PUSH_STACK(visit, cur); while (!BZLA_EMPTY_STACK(visit)) { cur = bzla_node_real_addr( bzla_node_get_simplified(bzla, BZLA_POP_STACK(visit))); if (bzla_hashint_table_contains(cache, cur->id)) continue; bzla_hashint_table_add(cache, cur->id); for (i = 0; i < cur->arity; i++) BZLA_PUSH_STACK(visit, cur->e[i]); } } BZLA_RELEASE_STACK(visit); /* Collect unreachable applies. */ for (size_t i = 1; i < BZLA_COUNT_STACK(bzla->nodes_id_table); i++) { cur = BZLA_PEEK_STACK(bzla->nodes_id_table, i); if (!cur) continue; cur = bzla_node_get_simplified(bzla, cur); if (cur->parameterized || !bzla_node_is_apply(cur) || bzla_hashint_table_contains(cache, cur->id)) continue; BZLALOG(1, "unreachable apply: %s", bzla_util_node2string(cur)); BZLA_PUSH_STACK(*init_apps, cur); } bzla_hashint_table_delete(cache); } static void check_and_resolve_conflicts(Bzla *bzla, Bzla *clone, BzlaNode *clone_root, BzlaNodeMap *exp_map, BzlaNodePtrStack *init_apps, BzlaIntHashTable *init_apps_cache) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); double start, start_cleanup; bool found_conflicts; int32_t i; BzlaMemMgr *mm; BzlaFunSolver *slv; BzlaNode *app, *cur; BzlaNodePtrStack prop_stack; BzlaNodePtrStack top_applies; BzlaPtrHashTable *cleanup_table; BzlaIntHashTable *apply_search_cache; BzlaPtrHashTableIterator pit; BzlaIntHashTableIterator iit; start = bzla_util_time_stamp(); found_conflicts = false; mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); cleanup_table = bzla_hashptr_table_new(mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); BZLA_INIT_STACK(mm, prop_stack); BZLA_INIT_STACK(mm, top_applies); apply_search_cache = bzla_hashint_table_new(mm); /* NOTE: if terms containing applies do not occur in the formula anymore due * to variable substitution, we still need to ensure that the assignment * computed for the substituted variable is correct. hence, we need to check * the applies for consistency and push them onto the propagation stack. * this also applies for don't care reasoning. */ bzla_iter_hashptr_init(&pit, bzla->inputs); while (bzla_iter_hashptr_has_next(&pit)) { cur = bzla_simplify_exp(bzla, bzla_iter_hashptr_next(&pit)); /* no parents -> is not reachable from the roots */ if (bzla_node_real_addr(cur)->parents > 0 || bzla_node_is_fun(cur)) continue; push_applies_for_propagation(bzla, cur, &prop_stack, apply_search_cache); } if (clone) { search_initial_applies_dual_prop( bzla, clone, clone_root, exp_map, &top_applies); init_apps = &top_applies; } else if (bzla_opt_get(bzla, BZLA_OPT_FUN_JUST)) { search_initial_applies_just(bzla, &top_applies); init_apps = &top_applies; } else search_initial_applies_bv_skeleton(slv, init_apps, init_apps_cache); /* For non-extensional problems, our model generation is able to compute * values for applies that are not reachable from assertions. However, for * extensional problems this is not sufficient (extensionality axiom not * checked). We therefore queue all unreachable applies to make sure that we * compute the correct model values. */ if (bzla_opt_get(bzla, BZLA_OPT_PRODUCE_MODELS) == 2 && bzla->feqs->count > 0) { push_unreachable_applies(bzla, init_apps); } for (i = BZLA_COUNT_STACK(*init_apps) - 1; i >= 0; i--) { app = BZLA_PEEK_STACK(*init_apps, i); assert(bzla_node_is_regular(app)); assert(bzla_node_is_apply(app)); assert(!app->parameterized); assert(!app->propagated); BZLA_PUSH_STACK(prop_stack, app); BZLA_PUSH_STACK(prop_stack, app->e[0]); BZLALOG(2, "push apply: %s", bzla_util_node2string(app)); } propagate(bzla, &prop_stack, cleanup_table, apply_search_cache); found_conflicts = BZLA_COUNT_STACK(slv->cur_lemmas) > 0; /* check consistency of array/uf equalities */ if (!found_conflicts && bzla->feqs->count > 0) { assert(BZLA_EMPTY_STACK(prop_stack)); add_extensionality_lemmas(bzla); found_conflicts = BZLA_COUNT_STACK(slv->cur_lemmas) > 0; } /* applies may have assignments that were not checked for consistency, which * is the case when they are not required for deriving SAT (don't care * reasoning). hence, we remove those applies from the 'bv_model' as they do * not have a valid assignment. an assignment will be generated during * model construction */ if (!found_conflicts) { bzla_iter_hashint_init(&iit, bzla->bv_model); while (bzla_iter_hashint_has_next(&iit)) { cur = bzla_node_get_by_id(bzla, bzla_iter_hashint_next(&iit)); if (bzla_node_is_apply(cur) && !bzla_node_real_addr(cur)->propagated) bzla_model_remove_from_bv(bzla, bzla->bv_model, cur); } } start_cleanup = bzla_util_time_stamp(); bzla_iter_hashptr_init(&pit, cleanup_table); while (bzla_iter_hashptr_has_next(&pit)) { BzlaPtrHashBucket *b = pit.bucket; cur = bzla_iter_hashptr_next(&pit); assert(bzla_node_is_regular(cur)); if (bzla_node_is_apply(cur)) { /* generate model for apply */ if (!found_conflicts) bzla_bv_free(bzla->mm, get_bv_assignment(bzla, cur)); cur->propagated = 0; } else { assert(bzla_node_is_fun(cur)); assert(cur->rho); if (found_conflicts) { bzla_hashptr_table_delete(cur->rho); cur->rho = 0; } else { /* remember functions for incremental usage (and prevent * premature release in case that function is released via API * call) */ BZLA_PUSH_STACK(bzla->functions_with_model, bzla_node_copy(bzla, cur)); } /* If flag is set we have to decrease the reference count (function * created while beta reducing). */ if (b->data.flag) { bzla_node_release(bzla, cur); continue; } } } slv->time.prop_cleanup += bzla_util_time_stamp() - start_cleanup; bzla_hashptr_table_delete(cleanup_table); BZLA_RELEASE_STACK(prop_stack); BZLA_RELEASE_STACK(top_applies); bzla_hashint_table_delete(apply_search_cache); slv->time.check_consistency += bzla_util_time_stamp() - start; } static void reset_lemma_cache(BzlaFunSolver *slv) { Bzla *bzla; BzlaPtrHashTableIterator it; bzla = slv->bzla; bzla_iter_hashptr_init(&it, slv->lemmas); while (bzla_iter_hashptr_has_next(&it)) bzla_node_release(bzla, bzla_iter_hashptr_next(&it)); bzla_hashptr_table_delete(slv->lemmas); slv->lemmas = bzla_hashptr_table_new(bzla->mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); } static void mark_cone(Bzla *bzla, BzlaNode *node, BzlaIntHashTable *cone, BzlaIntHashTable *roots, BzlaNodePtrStack *false_roots) { assert(bzla_lsutils_is_leaf_node(node)); BzlaNode *cur; BzlaNodeIterator it; BzlaNodePtrStack visit; BzlaHashTableData *d; BZLA_INIT_STACK(bzla->mm, visit); BZLA_PUSH_STACK(visit, node); while (!BZLA_EMPTY_STACK(visit)) { cur = bzla_node_real_addr(BZLA_POP_STACK(visit)); if (bzla_hashint_table_contains(cone, cur->id)) continue; bzla_hashint_table_add(cone, cur->id); d = bzla_hashint_map_get(roots, cur->id); if (d && d->flag) { BZLA_PUSH_STACK(*false_roots, cur); } bzla_iter_parent_init(&it, cur); while (bzla_iter_parent_has_next(&it)) { BZLA_PUSH_STACK(visit, bzla_iter_parent_next(&it)); } } BZLA_RELEASE_STACK(visit); } static BzlaSolverResult check_sat_prels(BzlaFunSolver *slv, BzlaSolver **ls_slv) { assert(slv); size_t i; double start; BzlaSolver *preslv; BzlaNodePtrStack assertions, roots_true, roots_false; BzlaIntHashTable *visited; const BzlaBitVector *bv; BzlaNode *root, *cur, *real_cur, *bvconst, *assertion; BzlaPtrHashTableIterator it; Bzla *bzla; BzlaMemMgr *mm; BzlaSolverResult result; BzlaHashTableData *d; bzla = slv->bzla; assert(!bzla->inconsistent); mm = bzla->mm; start = bzla_util_time_stamp(); if (!*ls_slv) { if (bzla_opt_get(bzla, BZLA_OPT_FUN_PREPROP)) { *ls_slv = bzla_new_prop_solver(bzla); } else { *ls_slv = bzla_new_sls_solver(bzla); } } assert(*ls_slv); preslv = *ls_slv; bzla->slv = preslv; result = preslv->api.sat(preslv); /* print prop/sls solver statistics */ preslv->api.print_stats(preslv); preslv->api.print_time_stats(preslv); /* reset */ bzla->slv = (BzlaSolver *) slv; BZLA_MSG(bzla->msg, 1, ""); BZLA_MSG(bzla->msg, 1, "%s engine determined '%s'", preslv->kind == BZLA_PROP_SOLVER_KIND ? "PROP" : "SLS", result == BZLA_RESULT_SAT ? "sat" : (result == BZLA_RESULT_UNSAT ? "unsat" : "unknown")); /* Use the partial model of the prels engine and determine input assignments * that already satisfy constraints and separated from all other unsatisfied * constraints. Assert these assignments to the bit-blasting engine. */ if (result == BZLA_RESULT_UNKNOWN && bzla_opt_get(bzla, BZLA_OPT_LS_SHARE_SAT) && !bzla_terminate(bzla) /* We support model sharing for QF_BV only. */ && !bzla_get_sat_mgr(bzla)->inc_required) { BZLA_INIT_STACK(mm, roots_true); BZLA_INIT_STACK(mm, roots_false); BZLA_INIT_STACK(mm, assertions); bzla_iter_hashptr_init(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); /* Collect all constraints. */ BzlaIntHashTable *roots = bzla_hashint_map_new(mm); while (bzla_iter_hashptr_has_next(&it)) { root = bzla_iter_hashptr_next(&it); d = bzla_hashint_map_add(roots, bzla_node_real_addr(root)->id); if (bzla_bv_is_true(bzla_model_get_bv(bzla, root))) { d->flag = true; BZLA_PUSH_STACK(roots_true, root); } else { d->flag = false; BZLA_PUSH_STACK(roots_false, root); } } /* Traverse each unsatisfied constraint down to the inputs and mark cone of * each input in function mark_cone. If a satisfied constraint is in the * cone of a traversed input, it is handled as an unsatisfied constraints * and therefore pushed onto the roots_false stack and continue. */ BzlaIntHashTable *cone = bzla_hashint_table_new(mm); visited = bzla_hashint_table_new(mm); while (!BZLA_EMPTY_STACK(roots_false)) { real_cur = bzla_node_real_addr(BZLA_POP_STACK(roots_false)); if (bzla_hashint_table_contains(visited, real_cur->id)) continue; bzla_hashint_table_add(visited, real_cur->id); if (bzla_lsutils_is_leaf_node(real_cur)) { mark_cone(bzla, real_cur, cone, roots, &roots_false); } else { for (i = 0; i < real_cur->arity; i++) { BZLA_PUSH_STACK(roots_false, real_cur->e[i]); } } } BZLA_RELEASE_STACK(roots_false); /* Collect all remaining inputs that are separated from the unsatisfied * constraints. */ while (!BZLA_EMPTY_STACK(roots_true)) { real_cur = bzla_node_real_addr(BZLA_POP_STACK(roots_true)); if (bzla_hashint_table_contains(cone, real_cur->id)) continue; if (bzla_hashint_table_contains(visited, real_cur->id)) continue; bzla_hashint_table_add(visited, real_cur->id); if (bzla_lsutils_is_leaf_node(real_cur)) { bv = bzla_model_get_bv(bzla, real_cur); bvconst = bzla_exp_bv_const(bzla, bv); assertion = bzla_exp_eq(bzla, real_cur, bvconst); bzla_node_release(bzla, bvconst); BZLA_PUSH_STACK(assertions, assertion); } else { for (i = 0; i < real_cur->arity; i++) { BZLA_PUSH_STACK(roots_true, real_cur->e[i]); } } } bzla_hashint_table_delete(visited); bzla_hashint_table_delete(cone); bzla_hashint_map_delete(roots); BZLA_RELEASE_STACK(roots_true); BZLA_FUN_SOLVER(bzla)->stats.prels_shared = BZLA_COUNT_STACK(assertions); BZLA_MSG(bzla->msg, 1, "asserting %u model values", BZLA_COUNT_STACK(assertions)); /* assert model values */ for (i = 0; i < BZLA_COUNT_STACK(assertions); ++i) { cur = BZLA_PEEK_STACK(assertions, i); bzla_assert_exp(bzla, cur); bzla_node_release(bzla, cur); } BZLA_RELEASE_STACK(assertions); } slv->time.prels_sat += bzla_util_time_stamp() - start; return result; } static BzlaSolverResult sat_fun_solver(BzlaFunSolver *slv) { assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(slv->bzla); assert(slv->bzla->slv == (BzlaSolver *) slv); uint32_t i; bool opt_prels, opt_prop_const_bits; BzlaSolverResult result; Bzla *bzla, *clone; BzlaNode *clone_root, *lemma; BzlaNodeMap *exp_map; BzlaIntHashTable *init_apps_cache; BzlaNodePtrStack init_apps; BzlaMemMgr *mm; BzlaSolver *ls_slv = 0; bzla = slv->bzla; mm = bzla->mm; opt_prels = bzla_opt_get(bzla, BZLA_OPT_FUN_PREPROP) || bzla_opt_get(bzla, BZLA_OPT_FUN_PRESLS); opt_prop_const_bits = bzla_opt_get(bzla, BZLA_OPT_PROP_CONST_BITS) != 0; assert(!bzla->inconsistent); /* make initial applies in bv skeleton global in order to prevent * traversing the whole formula every refinement round */ BZLA_INIT_STACK(mm, init_apps); init_apps_cache = bzla_hashint_table_new(mm); clone = 0; clone_root = 0; exp_map = 0; configure_sat_mgr(bzla); if (bzla_terminate(bzla)) { result = BZLA_RESULT_UNKNOWN; goto DONE; } if (slv->assume_lemmas) reset_lemma_cache(slv); if (bzla->feqs->count > 0) add_function_disequality_witnesses(bzla); /* initialize dual prop clone */ if (bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP)) { clone = new_exp_layer_clone_for_dual_prop(bzla, &exp_map, &clone_root); } BzlaPtrHashTableIterator it; bzla_iter_hashptr_init(&it, bzla->unsynthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->synthesized_constraints); bzla_iter_hashptr_queue(&it, bzla->assumptions); while (bzla_iter_hashptr_has_next(&it)) { BZLA_PUSH_STACK(slv->constraints, bzla_node_copy(bzla, bzla_iter_hashptr_next(&it))); } while (true) { result = BZLA_RESULT_UNKNOWN; if (bzla_terminate(bzla) || (slv->lod_limit > -1 && slv->stats.lod_refinements >= (uint32_t) slv->lod_limit)) { break; } if (opt_prels) { if (opt_prop_const_bits) { bzla_process_unsynthesized_constraints(bzla); if (bzla->found_constraint_false) { result = BZLA_RESULT_UNSAT; break; } assert(bzla->unsynthesized_constraints->count == 0); assert(bzla_dbg_check_all_hash_tables_proxy_free(bzla)); assert(bzla_dbg_check_all_hash_tables_simp_free(bzla)); } result = check_sat_prels(slv, &ls_slv); } if (result == BZLA_RESULT_UNKNOWN) { /* Word-blasting may add new constraints. Make sure that these also get * synthesized. */ bzla_add_again_assumptions(bzla); bzla_process_unsynthesized_constraints(bzla); if (bzla->found_constraint_false) { result = BZLA_RESULT_UNSAT; break; } assert(bzla->unsynthesized_constraints->count == 0); assert(bzla_dbg_check_all_hash_tables_proxy_free(bzla)); assert(bzla_dbg_check_all_hash_tables_simp_free(bzla)); /* make SAT call on bv skeleton */ result = timed_sat_sat(bzla, slv->sat_limit); /* Initialize new bit vector model, which will be constructed while * consistency checking. This also deletes the model from the previous * run. */ bzla_model_init_bv(bzla, &bzla->bv_model); } if (result == BZLA_RESULT_UNSAT) { break; } else if (result == BZLA_RESULT_UNKNOWN) { assert(slv->sat_limit > -1 || bzla_terminate(bzla) || bzla_opt_get(bzla, BZLA_OPT_PRINT_DIMACS)); break; } assert(result == BZLA_RESULT_SAT); if (bzla->ufs->count == 0 && bzla->lambdas->count == 0) break; bzla_reset_functions_with_model(bzla); check_and_resolve_conflicts( bzla, clone, clone_root, exp_map, &init_apps, init_apps_cache); if (BZLA_EMPTY_STACK(slv->cur_lemmas)) break; slv->stats.refinement_iterations++; BZLALOG(1, "add %d lemma(s)", BZLA_COUNT_STACK(slv->cur_lemmas)); /* add generated lemmas to formula */ for (i = 0; i < BZLA_COUNT_STACK(slv->cur_lemmas); i++) { lemma = BZLA_PEEK_STACK(slv->cur_lemmas, i); assert(!bzla_node_is_simplified(lemma)); // TODO (ma): use bzla_assert_exp? if (slv->assume_lemmas) bzla_assume_exp(bzla, lemma); else bzla_insert_unsynthesized_constraint(bzla, lemma); if (clone) add_lemma_to_dual_prop_clone(bzla, clone, &clone_root, lemma, exp_map); BZLA_PUSH_STACK(slv->constraints, bzla_node_copy(bzla, lemma)); } BZLA_RESET_STACK(slv->cur_lemmas); if (bzla_opt_get(bzla, BZLA_OPT_VERBOSITY)) { printf( "[bzlaslvfun] %d iterations, %d lemmas, %d ext. lemmas, " "vars %d, applies %d\n", slv->stats.refinement_iterations, slv->stats.lod_refinements, slv->stats.extensionality_lemmas, bzla->ops[BZLA_VAR_NODE].cur, bzla->ops[BZLA_APPLY_NODE].cur); } /* may be set via insert_unsythesized_constraint * in case generated lemma is false */ if (bzla->inconsistent) { result = BZLA_RESULT_UNSAT; break; } } DONE: BZLA_RELEASE_STACK(init_apps); bzla_hashint_table_delete(init_apps_cache); if (clone) { assert(exp_map); bzla_nodemap_delete(exp_map); bzla_node_release(clone, clone_root); bzla_delete(clone); } if (ls_slv) { bzla->slv = ls_slv; ls_slv->api.delet(ls_slv); bzla->slv = (BzlaSolver *) slv; } bzla->last_sat_result = result; bzla->valid_assignments = 1; return result; } /*------------------------------------------------------------------------*/ static void generate_model_fun_solver(BzlaFunSolver *slv, bool model_for_all_nodes, bool reset) { assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(slv->bzla); assert(slv->bzla->slv == (BzlaSolver *) slv); (void) reset; /* already created during check_and_resolve_conflicts */ if (!slv->bzla->bv_model) bzla_model_init_bv(slv->bzla, &slv->bzla->bv_model); bzla_model_init_fun(slv->bzla, &slv->bzla->fun_model); bzla_model_generate(slv->bzla, slv->bzla->bv_model, slv->bzla->fun_model, model_for_all_nodes); } static void print_stats_fun_solver(BzlaFunSolver *slv) { assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(slv->bzla); assert(slv->bzla->slv == (BzlaSolver *) slv); uint32_t i; Bzla *bzla; bzla = slv->bzla; if (!(slv = BZLA_FUN_SOLVER(bzla))) return; if (bzla_opt_get(bzla, BZLA_OPT_FUN_PREPROP) || bzla_opt_get(bzla, BZLA_OPT_FUN_PRESLS)) { BZLA_MSG(bzla->msg, 1, ""); BZLA_MSG(bzla->msg, 1, "preprop/presls statistics:"); BZLA_MSG(bzla->msg, 1, "%7d assignments shared with bit-blasting engine", slv->stats.prels_shared); } if (bzla->ufs->count || bzla->lambdas->count) { BZLA_MSG(bzla->msg, 1, ""); BZLA_MSG(bzla->msg, 1, "lemmas on demand statistics:"); BZLA_MSG(bzla->msg, 1, "%4d refinement iterations", slv->stats.refinement_iterations); BZLA_MSG(bzla->msg, 1, "%4d LOD refinements", slv->stats.lod_refinements); if (slv->stats.lod_refinements) { BZLA_MSG(bzla->msg, 1, " %4d function congruence conflicts", slv->stats.function_congruence_conflicts); BZLA_MSG(bzla->msg, 1, " %4d beta reduction conflicts", slv->stats.beta_reduction_conflicts); BZLA_MSG(bzla->msg, 1, " %4d extensionality lemmas", slv->stats.extensionality_lemmas); BZLA_MSG(bzla->msg, 1, " %.1f average lemma size", BZLA_AVERAGE_UTIL(slv->stats.lemmas_size_sum, slv->stats.lod_refinements)); for (i = 1; i < BZLA_SIZE_STACK(slv->stats.lemmas_size); i++) { if (!slv->stats.lemmas_size.start[i]) continue; BZLA_MSG(bzla->msg, 1, " %4d lemmas of size %d", slv->stats.lemmas_size.start[i], i); } } BZLA_MSG(bzla->msg, 1, ""); BZLA_MSG(bzla->msg, 1, "%7lld expression evaluations", slv->stats.eval_exp_calls); BZLA_MSG(bzla->msg, 1, "%7lld partial beta reductions", bzla->stats.betap_reduce_calls); BZLA_MSG(bzla->msg, 1, "%7lld propagations", slv->stats.propagations); BZLA_MSG( bzla->msg, 1, "%7lld propagations down", slv->stats.propagations_down); } if (bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP)) { BZLA_MSG(bzla->msg, 1, "%d/%d dual prop. vars (failed/assumed)", slv->stats.dp_failed_vars, slv->stats.dp_assumed_vars); BZLA_MSG(bzla->msg, 1, "%d/%d dual prop. applies (failed/assumed)", slv->stats.dp_failed_applies, slv->stats.dp_assumed_applies); } } static void print_time_stats_fun_solver(BzlaFunSolver *slv) { assert(slv); assert(slv->kind == BZLA_FUN_SOLVER_KIND); assert(slv->bzla); assert(slv->bzla->slv == (BzlaSolver *) slv); Bzla *bzla; bzla = slv->bzla; BZLA_MSG(bzla->msg, 1, ""); BZLA_MSG(bzla->msg, 1, "%.2f seconds consistency checking", slv->time.check_consistency); BZLA_MSG(bzla->msg, 1, " %.2f seconds initial applies search", slv->time.search_init_apps); if (bzla_opt_get(bzla, BZLA_OPT_FUN_JUST) || bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP)) { BZLA_MSG(bzla->msg, 1, " %.2f seconds compute scores", slv->time.search_init_apps_compute_scores); BZLA_MSG(bzla->msg, 1, " %.2f seconds merge applies", slv->time.search_init_apps_compute_scores_merge_applies); } if (bzla_opt_get(bzla, BZLA_OPT_FUN_DUAL_PROP)) { BZLA_MSG(bzla->msg, 1, " %.2f seconds cloning", slv->time.search_init_apps_cloning); BZLA_MSG(bzla->msg, 1, " %.2f seconds SAT solving", slv->time.search_init_apps_sat); BZLA_MSG(bzla->msg, 1, " %.2f seconds collecting bv vars and apps", slv->time.search_init_apps_collect_var_apps); BZLA_MSG(bzla->msg, 1, " %.2f seconds collecting initial applies (FA)", slv->time.search_init_apps_collect_fa); BZLA_MSG(bzla->msg, 1, " %.2f seconds cone traversal", slv->time.search_init_apps_collect_fa_cone); } BZLA_MSG(bzla->msg, 1, " %.2f seconds propagation", slv->time.prop); BZLA_MSG( bzla->msg, 1, " %.2f seconds expression evaluation", slv->time.eval); BZLA_MSG(bzla->msg, 1, " %.2f seconds partial beta reduction", bzla->time.betap); BZLA_MSG( bzla->msg, 1, " %.2f seconds lemma generation", slv->time.lemma_gen); BZLA_MSG(bzla->msg, 1, " %.2f seconds propagation apply search", slv->time.find_prop_app); BZLA_MSG(bzla->msg, 1, " %.2f seconds conflict apply search", slv->time.find_conf_app); if (bzla->feqs->count > 0) BZLA_MSG(bzla->msg, 1, " %.2f seconds check extensionality", slv->time.check_extensionality); BZLA_MSG(bzla->msg, 1, " %.2f seconds propagation cleanup", slv->time.prop_cleanup); BZLA_MSG(bzla->msg, 1, ""); if ((bzla_opt_get(bzla, BZLA_OPT_FUN_PREPROP) || bzla_opt_get(bzla, BZLA_OPT_FUN_PRESLS)) && bzla_opt_get(bzla, BZLA_OPT_LS_SHARE_SAT)) { BZLA_MSG(bzla->msg, 1, "%.2f seconds for preprop/presls SAT check with partial " "assignment", slv->time.prels_sat); } BZLA_MSG(bzla->msg, 1, "%.2f seconds in pure SAT solving", slv->time.sat); BZLA_MSG(bzla->msg, 1, ""); } static void print_model_fun_solver(BzlaFunSolver *slv, const char *format, FILE *file) { bzla_print_model_aufbvfp(slv->bzla, format, file); } BzlaSolver * bzla_new_fun_solver(Bzla *bzla) { assert(bzla); BzlaFunSolver *slv; BZLA_CNEW(bzla->mm, slv); slv->kind = BZLA_FUN_SOLVER_KIND; slv->bzla = bzla; slv->api.clone = (BzlaSolverClone) clone_fun_solver; slv->api.delet = (BzlaSolverDelete) delete_fun_solver; slv->api.sat = (BzlaSolverSat) sat_fun_solver; slv->api.generate_model = (BzlaSolverGenerateModel) generate_model_fun_solver; slv->api.print_stats = (BzlaSolverPrintStats) print_stats_fun_solver; slv->api.print_time_stats = (BzlaSolverPrintTimeStats) print_time_stats_fun_solver; slv->api.print_model = (BzlaSolverPrintModel) print_model_fun_solver; slv->lod_limit = -1; slv->sat_limit = -1; slv->lemmas = bzla_hashptr_table_new(bzla->mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); BZLA_INIT_STACK(bzla->mm, slv->cur_lemmas); BZLA_INIT_STACK(bzla->mm, slv->constraints); BZLA_INIT_STACK(bzla->mm, slv->stats.lemmas_size); BZLA_MSG(bzla->msg, 1, "enabled core engine"); return (BzlaSolver *) slv; } // TODO (ma): this is just a fix for now, this should be moved elsewhere BzlaBitVector * bzla_eval_exp(Bzla *bzla, BzlaNode *exp) { assert(bzla); assert(bzla->slv); assert(bzla->slv->kind == BZLA_FUN_SOLVER_KIND); assert(exp); assert(bzla->bv_model); uint32_t i; double start; BzlaMemMgr *mm; BzlaNodePtrStack work_stack; BzlaVoidPtrStack arg_stack; BzlaNode *cur, *real_cur, *next; BzlaPtrHashTable *cache; BzlaPtrHashBucket *b; BzlaPtrHashTableIterator it; BzlaBitVector *result = 0, *inv_result, **e; BzlaFunSolver *slv; BzlaIntHashTable *mark; BzlaHashTableData *d; start = bzla_util_time_stamp(); mm = bzla->mm; slv = BZLA_FUN_SOLVER(bzla); slv->stats.eval_exp_calls++; BZLA_INIT_STACK(mm, work_stack); BZLA_INIT_STACK(mm, arg_stack); cache = bzla_hashptr_table_new(mm, (BzlaHashPtr) bzla_node_hash_by_id, (BzlaCmpPtr) bzla_node_compare_by_id); mark = bzla_hashint_map_new(mm); BZLA_PUSH_STACK(work_stack, exp); while (!BZLA_EMPTY_STACK(work_stack)) { cur = bzla_node_get_simplified(bzla, BZLA_POP_STACK(work_stack)); real_cur = bzla_node_real_addr(cur); d = bzla_hashint_map_get(mark, real_cur->id); if (!d) { if (bzla_node_is_bv_var(real_cur) || bzla_node_is_apply(real_cur) || bzla_node_is_fun_eq(real_cur) || has_bv_assignment(bzla, real_cur)) { result = get_bv_assignment(bzla, real_cur); goto EVAL_EXP_PUSH_RESULT; } else if (bzla_node_is_bv_const(real_cur)) { result = bzla_bv_copy(mm, bzla_node_bv_const_get_bits(real_cur)); goto EVAL_EXP_PUSH_RESULT; } /* Word-blast FP nodes and do evaluation on BV representation */ else if (bzla_node_fp_needs_word_blast(bzla, real_cur)) { next = bzla_fp_word_blast(bzla, real_cur); BZLA_PUSH_STACK(work_stack, next); continue; } /* substitute param with its assignment */ else if (bzla_node_is_param(real_cur)) { next = bzla_node_param_get_assigned_exp(real_cur); assert(next); if (bzla_node_is_inverted(cur)) next = bzla_node_invert(next); BZLA_PUSH_STACK(work_stack, next); continue; } BZLA_PUSH_STACK(work_stack, cur); bzla_hashint_map_add(mark, real_cur->id); for (i = 0; i < real_cur->arity; i++) BZLA_PUSH_STACK(work_stack, real_cur->e[i]); } else if (d->as_int == 0) { assert(!bzla_node_is_param(real_cur)); assert(!bzla_node_is_args(real_cur)); assert(!bzla_node_is_fun(real_cur)); assert(real_cur->arity >= 1); assert(real_cur->arity <= BZLA_NODE_MAX_CHILDREN); assert(real_cur->arity <= BZLA_COUNT_STACK(arg_stack)); d->as_int = 1; arg_stack.top -= real_cur->arity; e = (BzlaBitVector **) arg_stack.top; /* arguments in reverse order */ switch (real_cur->kind) { case BZLA_BV_SLICE_NODE: result = bzla_bv_slice(mm, e[0], bzla_node_bv_slice_get_upper(real_cur), bzla_node_bv_slice_get_lower(real_cur)); bzla_bv_free(mm, e[0]); break; case BZLA_BV_AND_NODE: result = bzla_bv_and(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_EQ_NODE: result = bzla_bv_eq(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_ADD_NODE: result = bzla_bv_add(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_MUL_NODE: result = bzla_bv_mul(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_ULT_NODE: result = bzla_bv_ult(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_SLT_NODE: result = bzla_bv_slt(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_SLL_NODE: result = bzla_bv_sll(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_SRL_NODE: result = bzla_bv_srl(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_UDIV_NODE: result = bzla_bv_udiv(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_UREM_NODE: result = bzla_bv_urem(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_BV_CONCAT_NODE: result = bzla_bv_concat(mm, e[1], e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); break; case BZLA_COND_NODE: if (bzla_bv_is_true(e[2])) result = bzla_bv_copy(mm, e[1]); else result = bzla_bv_copy(mm, e[0]); bzla_bv_free(mm, e[0]); bzla_bv_free(mm, e[1]); bzla_bv_free(mm, e[2]); break; default: BZLALOG(1, " *** %s", bzla_util_node2string(real_cur)); /* should be unreachable */ assert(0); } assert(!bzla_hashptr_table_get(cache, real_cur)); bzla_hashptr_table_add(cache, real_cur)->data.as_ptr = bzla_bv_copy(mm, result); EVAL_EXP_PUSH_RESULT: if (bzla_node_is_inverted(cur)) { inv_result = bzla_bv_not(mm, result); bzla_bv_free(mm, result); result = inv_result; } BZLA_PUSH_STACK(arg_stack, result); } else { assert(d->as_int == 1); b = bzla_hashptr_table_get(cache, real_cur); assert(b); result = bzla_bv_copy(mm, (BzlaBitVector *) b->data.as_ptr); goto EVAL_EXP_PUSH_RESULT; } } assert(BZLA_COUNT_STACK(arg_stack) == 1); result = BZLA_POP_STACK(arg_stack); assert(result); while (!BZLA_EMPTY_STACK(arg_stack)) { inv_result = BZLA_POP_STACK(arg_stack); bzla_bv_free(mm, inv_result); } bzla_iter_hashptr_init(&it, cache); while (bzla_iter_hashptr_has_next(&it)) { bzla_bv_free(mm, (BzlaBitVector *) it.bucket->data.as_ptr); real_cur = bzla_iter_hashptr_next(&it); } BZLA_RELEASE_STACK(work_stack); BZLA_RELEASE_STACK(arg_stack); bzla_hashptr_table_delete(cache); bzla_hashint_map_delete(mark); // BZLALOG ("%s: %s '%s'", __FUNCTION__, bzla_util_node2string (exp), // result); slv->time.eval += bzla_util_time_stamp() - start; return result; }
/*** * Bitwuzla: Satisfiability Modulo Theories (SMT) solver. * * This file is part of Bitwuzla. * * Copyright (C) 2007-2022 by the authors listed in the AUTHORS file. * * See COPYING for more information on using this software. */
data.ml
module Command = struct type t = (* Activate a protocol *) | Activate of { protocol: Protocol_hash.t ; fitness: Fitness.t ; protocol_parameters : MBytes.t ; } (* Activate a protocol as a testchain *) | Activate_testchain of { protocol: Protocol_hash.t ; delay: Int64.t ; } let mk_case name args = let open Data_encoding in conv (fun o -> ((), o)) (fun ((), o) -> o) (merge_objs (obj1 (req "command" (constant name))) args) let encoding = let open Data_encoding in union ~tag_size:`Uint8 [ case (Tag 0) ~title:"Activate" (mk_case "activate" (obj3 (req "hash" Protocol_hash.encoding) (req "fitness" Fitness.encoding) (req "protocol_parameters" Variable.bytes) )) (function | Activate { protocol ; fitness ; protocol_parameters} -> Some (protocol, fitness, protocol_parameters) | _ -> None) (fun (protocol, fitness, protocol_parameters) -> Activate { protocol ; fitness ; protocol_parameters }) ; case (Tag 1) ~title:"Activate_testchain" (mk_case "activate_testchain" (obj2 (req "hash" Protocol_hash.encoding) (req "validity_time" int64))) (function | Activate_testchain { protocol ; delay } -> Some (protocol, delay) | _ -> None) (fun (protocol, delay) -> Activate_testchain { protocol ; delay }) ; ] let signed_encoding = let open Data_encoding in obj2 (req "content" encoding) (req "signature" Signature.encoding) let forge shell command = Data_encoding.Binary.to_bytes_exn (Data_encoding.tup2 Block_header.shell_header_encoding encoding) (shell, command) end module Pubkey = struct let pubkey_key = ["genesis_key"] let default = Signature.Public_key.of_b58check_exn "edpkvVCdQtDJHPnkmfRZuuHWKzFetH9N9nGP8F7zkwM2BJpjbvAU1N" let get_pubkey ctxt = Context.get ctxt pubkey_key >>= function | None -> Lwt.return default | Some b -> match Data_encoding.Binary.of_bytes Signature.Public_key.encoding b with | None -> Lwt.return default | Some pk -> Lwt.return pk let set_pubkey ctxt v = Context.set ctxt pubkey_key @@ Data_encoding.Binary.to_bytes_exn Signature.Public_key.encoding v let sandbox_encoding = let open Data_encoding in merge_objs (obj1 (req "genesis_pubkey" Signature.Public_key.encoding)) Data_encoding.unit let may_change_default ctxt json = match Data_encoding.Json.destruct sandbox_encoding json with | exception _ -> Lwt.return ctxt | (pubkey, ()) -> set_pubkey ctxt pubkey >>= fun ctxt -> Lwt.return ctxt end module Init = struct type error += Incompatible_protocol_version let version_key = ["version"] (* This key should always be populated for every version of the protocol. It's absence meaning that the context is empty. *) let version_value = "genesis" let check_inited ctxt = Context.get ctxt version_key >>= function | None -> failwith "Internal error: uninitialized context." | Some version -> if Compare.String.(version_value <> MBytes.to_string version) then failwith "Internal error: incompatible protocol version" ; return_unit let tag_first_block ctxt = Context.get ctxt version_key >>= function | None -> Context.set ctxt version_key (MBytes.of_string version_value) >>= fun ctxt -> return ctxt | Some _version -> failwith "Internal error: previously initialized context." ; end
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
alias.ml
open Lens_utility type t = string [@@deriving show, eq, sexp] module Map = struct include String.Map let find t ~key = find_opt key t end module Set = struct module Base = String.Set include Base let sexp_of_t v = elements v |> sexp_of_list sexp_of_t let t_of_sexp v = let l = list_of_sexp t_of_sexp v in of_list l let pp_pretty fmt cs = Format.fprintf fmt "%a" (Format.pp_print_list ~pp_sep:(Format.pp_constant " ") Format.pp_print_string) (elements cs) module List = struct type elt = Base.t type t = elt list exception Not_disjoint of Base.t let is_disjoint s = try List.fold_right (fun e acc -> let int = Base.inter e acc in if Base.is_empty int |> not then raise (Not_disjoint int); Base.union e acc) s Base.empty |> ignore; Result.return () with | Not_disjoint t -> Result.error t end module Set = struct include Set.Make (String.Set) let is_disjoint s = elements s |> List.is_disjoint end end
mul_interpolate.c
#include "flint.h" #include "nmod_poly.h" #include "nmod_poly_mat.h" void nmod_poly_mat_mul_interpolate(nmod_poly_mat_t C, const nmod_poly_mat_t A, const nmod_poly_mat_t B) { slong i, j, k; slong A_len, B_len, len; nmod_mat_t *C_mod, *A_mod, *B_mod; mp_ptr xs; mp_ptr tt, uu; mp_ptr * tree; mp_ptr weights; nmod_t mod; if (B->r == 0) { nmod_poly_mat_zero(C); return; } A_len = nmod_poly_mat_max_length(A); B_len = nmod_poly_mat_max_length(B); if (A_len == 0 || B_len == 0) { nmod_poly_mat_zero(C); return; } len = A_len + B_len - 1; nmod_init(&mod, nmod_poly_mat_modulus(A)); if (mod.n < len) { flint_printf("Exception (nmod_poly_mat_mul_interpolate). \n" "Characteristic is too small.\n"); flint_abort(); } xs = _nmod_vec_init(len); tt = _nmod_vec_init(len); uu = _nmod_vec_init(len); weights = _nmod_vec_init(len); A_mod = flint_malloc(sizeof(nmod_mat_t) * len); B_mod = flint_malloc(sizeof(nmod_mat_t) * len); C_mod = flint_malloc(sizeof(nmod_mat_t) * len); for (i = 0; i < len; i++) { xs[i] = i; nmod_mat_init(A_mod[i], A->r, A->c, mod.n); nmod_mat_init(B_mod[i], B->r, B->c, mod.n); nmod_mat_init(C_mod[i], C->r, C->c, mod.n); } tree = _nmod_poly_tree_alloc(len); _nmod_poly_tree_build(tree, xs, len, mod); _nmod_poly_interpolation_weights(weights, tree, len, mod); for (i = 0; i < A->r; i++) { for (j = 0; j < A->c; j++) { _nmod_poly_evaluate_nmod_vec_fast_precomp(tt, nmod_poly_mat_entry(A, i, j)->coeffs, nmod_poly_mat_entry(A, i, j)->length, tree, len, mod); for (k = 0; k < len; k++) A_mod[k]->rows[i][j] = tt[k]; } } for (i = 0; i < B->r; i++) { for (j = 0; j < B->c; j++) { _nmod_poly_evaluate_nmod_vec_fast_precomp(tt, nmod_poly_mat_entry(B, i, j)->coeffs, nmod_poly_mat_entry(B, i, j)->length, tree, len, mod); for (k = 0; k < len; k++) B_mod[k]->rows[i][j] = tt[k]; } } for (i = 0; i < len; i++) nmod_mat_mul(C_mod[i], A_mod[i], B_mod[i]); for (i = 0; i < C->r; i++) { for (j = 0; j < C->c; j++) { nmod_poly_struct * poly; for (k = 0; k < len; k++) tt[k] = C_mod[k]->rows[i][j]; poly = nmod_poly_mat_entry(C, i, j); nmod_poly_fit_length(poly, len); _nmod_poly_interpolate_nmod_vec_fast_precomp(poly->coeffs, tt, tree, weights, len, mod); poly->length = len; _nmod_poly_normalise(poly); } } _nmod_poly_tree_free(tree, len); for (i = 0; i < len; i++) { nmod_mat_clear(A_mod[i]); nmod_mat_clear(B_mod[i]); nmod_mat_clear(C_mod[i]); } flint_free(A_mod); flint_free(B_mod); flint_free(C_mod); _nmod_vec_clear(xs); _nmod_vec_clear(tt); _nmod_vec_clear(uu); _nmod_vec_clear(weights); }
/* Copyright (C) 2012 Fredrik Johansson This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
tag_only.ml
(** @deprecated *) open Module (** abc @deprecated *) open Module (** @author A *) open Module (** @inline *) open Module (** @inline *) include Abc (** @inline *) include struct type t end (** @inline *) include (Module : Type) (** @inline *) module A = B (** @inline *) module A : sig type t end = struct type t end (** @inline *) module rec A : sig type t end = struct type t end (** @author B *) and B : sig type t end = struct type t end (** @deprecated abc *) module type A = B (** @deprecated abc *) module type A = sig type t end (** @open *) module A : sig type t end = B (** @deprecated *) open Module.With_veryyyyyy_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) include Module.With_very_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) module A = Module.With_very_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) type t = T (** @deprecated *) type t = t (** @deprecated *) let a = b (** @deprecated *) type t = t (** @deprecated *) class b = object (** @deprecated *) method f = 0 (** @deprecated *) inherit a (** @deprecated *) val x = 1 (** @deprecated *) constraint 'a = [> ] (** @deprecated *) initializer do_init () end [@@@ocamlformat "doc-comments-tag-only=fit"] open Module (** @deprecated *) (** abc @deprecated *) open Module open Module (** @author A *) open Module (** @inline *) include Abc (** @inline *) (** @inline *) include struct type t end include (Module : Type) (** @inline *) module A = B (** @inline *) (** @inline *) module A : sig type t end = struct type t end (** @inline *) module rec A : sig type t end = struct type t end (** @author B *) and B : sig type t end = struct type t end module type A = B (** @deprecated abc *) (** @deprecated abc *) module type A = sig type t end (** @open *) module A : sig type t end = B open Module.With_veryyyyyy_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) include Module.With_very_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) module A = Module.With_very_loooooooooooooooooooooooong_naaaaaaaaaaaaaaaaame (** @deprecated *) (** @deprecated *) type t = T type t = t (** @deprecated *) (** @deprecated *) let a = b (** @deprecated *) type t = t (** @deprecated *) class b = object method f = 0 (** @deprecated *) inherit a (** @deprecated *) val x = 1 (** @deprecated *) constraint 'a = [> ] (** @deprecated *) initializer do_init () (** @deprecated *) end
(** @deprecated *) open Module
type_description.ml
module Types (F : Ctypes.TYPE) = struct end
web_session.ml
open Alcotest_lwt open Lwt.Syntax let unsigned_session_cookie _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" (* default empty session with default test secret *) |> Opium.Request.add_cookie ("_session", "{}") in let handler _ = (* We don't set any session values *) Lwt.return @@ Opium.Response.of_plain_text "" in let* response = Rock.Middleware.apply middleware handler req in let cookie = Opium.Response.cookies response |> List.hd in let cookie_value = cookie.Opium.Cookie.value in (* Unsigned cookie fails silently, new session is started *) Alcotest.( check (pair string string) "responds with empty cookie" ("_session", "{}.byiLJwVqMzg39fb251SaoN+19fo=") cookie_value); Lwt.return () ;; let invalid_session_cookie_signature _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" (* default empty session with default test secret *) |> Opium.Request.add_cookie ("_session", "{}.ayiLJwVqMzg39fb251SaoN+19fo=") in let handler _ = (* We don't set any session values *) Lwt.return @@ Opium.Response.of_plain_text "" in let* response = Rock.Middleware.apply middleware handler req in let cookie = Opium.Response.cookies response |> List.hd in let cookie_value = cookie.Opium.Cookie.value in (* Invalid signature fails silently, new session is started *) Alcotest.( check (pair string string) "responds with empty cookie" ("_session", "{}.byiLJwVqMzg39fb251SaoN+19fo=") cookie_value); Lwt.return () ;; let invalid_session_cookie_value _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" (* default empty session with default test secret *) |> Opium.Request.add_cookie ("_session", "invalid content.byiLJwVqMzg39fb251SaoN+19fo=") in let handler _ = (* We don't set any session values *) Lwt.return @@ Opium.Response.of_plain_text "" in let* response = Rock.Middleware.apply middleware handler req in let cookie = Opium.Response.cookies response |> List.hd in let cookie_value = cookie.Opium.Cookie.value in (* Invalid cookie value fails silently, new session is started *) Alcotest.( check (pair string string) "responds with empty cookie" ("_session", "{}.byiLJwVqMzg39fb251SaoN+19fo=") cookie_value); Lwt.return () ;; let no_empty_cookie_set_if_already_present _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" (* default empty session with default test secret *) |> Opium.Request.add_cookie ("_session", "{}.byiLJwVqMzg39fb251SaoN+19fo=") in let handler _ = (* We don't set any session values *) Lwt.return @@ Opium.Response.of_plain_text "" in let* response = Rock.Middleware.apply middleware handler req in let cookies = Opium.Response.cookies response in Alcotest.(check int "responds without cookie" 0 (List.length cookies)); Lwt.return () ;; let empty_cookie_set _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" in let handler _ = (* We don't set any session values *) Lwt.return @@ Opium.Response.of_plain_text "" in let* response = Rock.Middleware.apply middleware handler req in let cookies = Opium.Response.cookies response in Alcotest.(check int "responds with one cookie" 1 (List.length cookies)); let cookie = Opium.Response.cookie "_session" response |> Option.get in Alcotest.( check (pair string string) "has empty content" (* default empty session with default test secret *) ("_session", "{}.byiLJwVqMzg39fb251SaoN+19fo=") cookie.Opium.Cookie.value); Lwt.return () ;; let cookie_set _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" in let handler _ = let resp = Opium.Response.of_plain_text "" in Lwt.return @@ Sihl.Web.Session.set ("foo", Some "bar") resp in let* response = Rock.Middleware.apply middleware handler req in let cookie = Opium.Response.cookies response |> List.hd in let cookie_value = cookie.Opium.Cookie.value in Alcotest.( check (pair string string) "persists session values" ("_session", {|{"foo":"bar"}.jE75kXj9sbZp6tP7oJLhrp9c/+w=|}) cookie_value); Lwt.return () ;; let session_persisted_across_requests _ () = let middleware = Sihl.Web.Middleware.session () in let req = Opium.Request.get "" in let handler _ = let resp = Opium.Response.of_plain_text "" in Lwt.return @@ Sihl.Web.Session.set ("foo", Some "bar") resp in let* response = Rock.Middleware.apply middleware handler req in let cookies = Opium.Response.cookies response in Alcotest.( check int "responds with exactly one cookie" 1 (List.length cookies)); let cookie = Opium.Response.cookie "_session" response |> Option.get in let cookie_value = cookie.Opium.Cookie.value in Alcotest.( check (pair string string) "persists session values" ("_session", {|{"foo":"bar"}.jE75kXj9sbZp6tP7oJLhrp9c/+w=|}) cookie_value); let req = Opium.Request.get "" |> Opium.Request.add_cookie cookie.Opium.Cookie.value in let handler req = let session_value = Sihl.Web.Session.find "foo" req in Alcotest.( check (option string) "has session value" (Some "bar") session_value); let resp = Opium.Response.of_plain_text "" |> Sihl.Web.Session.set ("foo", None) |> Sihl.Web.Session.set ("fooz", Some "other") in Lwt.return resp in let* response = Rock.Middleware.apply middleware handler req in let cookies = Opium.Response.cookies response in Alcotest.( check int "responds with exactly one cookie" 1 (List.length cookies)); let cookie = Opium.Response.cookie "_session" response |> Option.get in let cookie_value = cookie.Opium.Cookie.value in Alcotest.( check (pair string string) "persists session values" ("_session", {|{"fooz":"other"}.VRJU0/vmwzPLrDU0zulQ7MojZUU=|}) cookie_value); let req = Opium.Request.get "" |> Opium.Request.add_cookie cookie.Opium.Cookie.value in let handler req = Alcotest.( check (option string) "has deleted session value" None (Sihl.Web.Session.find "foo" req)); Alcotest.( check (option string) "has set session value" (Some "other") (Sihl.Web.Session.find "fooz" req)); Lwt.return @@ Opium.Response.of_plain_text "" in let* _ = Rock.Middleware.apply middleware handler req in Lwt.return () ;; let suite = [ ( "session" , [ test_case "unsigned session cookie" `Quick unsigned_session_cookie ; test_case "invalid session cookie signature" `Quick invalid_session_cookie_signature ; test_case "invalid session cookie value" `Quick invalid_session_cookie_value ; test_case "no empty cookie set if already present" `Quick no_empty_cookie_set_if_already_present ; test_case "empty cookie set" `Quick empty_cookie_set ; test_case "cookie set" `Quick cookie_set ; test_case "session persisted across requests" `Quick session_persisted_across_requests ] ) ] ;; let () = Logs.set_level (Sihl.Log.get_log_level ()); Logs.set_reporter (Sihl.Log.cli_reporter ()); Lwt_main.run (Alcotest_lwt.run "session" suite) ;;
parser_cpp_mly_helper.ml
open Common open Ast_cpp module Ast = Ast_cpp module Flag = Flag_parsing let logger = Logging.get_logger [ __MODULE__ ] (*****************************************************************************) (* Wrappers *) (*****************************************************************************) (* TODO: switch to use logger *) let warning s v = if !Flag.verbose_parsing then Common2.warning ("PARSING: " ^ s) v else v let error s tok = raise (Parse_info.Other_error (s, tok)) let fake s = Parse_info.fake_info s (*****************************************************************************) (* Parse helpers functions *) (*****************************************************************************) (*-------------------------------------------------------------------------- *) (* Type related *) (*-------------------------------------------------------------------------- *) type storage_opt = NoSto | StoTypedef of tok | Sto of storage wrap type shortLong = Short of tok | Long of tok | LongLong of tok * tok type sign = Signed of tok | UnSigned of tok type decl = { storageD : storage_opt; typeD : sign option * shortLong option * typeC option; qualifD : type_qualifiers; modifierD : modifier list; } let ii_of_sign = function | Signed ii | UnSigned ii -> ii let ii_of_short_long = function | Short ii | Long ii | LongLong (ii, _) -> ii let nullDecl = { storageD = NoSto; typeD = (None, None, None); qualifD = Ast.nQ; modifierD = []; } let addStorageD x decl = match decl with | { storageD = NoSto; _ } -> { decl with storageD = x } | { storageD = (StoTypedef ii | Sto (_, ii)) as y; _ } -> if x =*= y then decl |> warning "duplicate storage classes" else error "multiple storage classes" ii let addModifierD x decl = (* old: check: warning "duplicate inline" *) { decl with modifierD = x :: decl.modifierD } let addTypeD ty decl = match (ty, decl) with | Left3 (Signed _), { typeD = Some (Signed _), _b, _c; _ } -> decl |> warning "duplicate 'signed'" | Left3 (UnSigned _), { typeD = Some (UnSigned _), _b, _c; _ } -> decl |> warning "duplicate 'unsigned'" | Left3 sign, { typeD = Some _, _b, _c; _ } -> error "both signed and unsigned specified" (ii_of_sign sign) | Left3 x, { typeD = None, b, c; _ } -> { decl with typeD = (Some x, b, c) } | Middle3 (Short _), { typeD = _a, Some (Short _), _c; _ } -> decl |> warning "duplicate 'short'" (* gccext: long long allowed *) | Middle3 (Long t1), { typeD = a, Some (Long t2), c; _ } -> { decl with typeD = (a, Some (LongLong (t1, t2)), c) } | Middle3 (Long _), { typeD = _a, Some (LongLong _), _c; _ } -> decl |> warning "triplicate 'long'" | Middle3 sl, { typeD = _a, Some _, _c; _ } -> error "both long and short specified" (ii_of_short_long sl) | Middle3 x, { typeD = a, None, c; _ } -> { decl with typeD = (a, Some x, c) } | Right3 _t, { typeD = _a, _b, Some _; _ } -> (* old: was error before, but tedious to get an ii for error *) decl |> warning "two or more data types" | Right3 t, { typeD = a, b, None; _ } -> { decl with typeD = (a, b, Some t) } let addQualif tq1 tq2 = (* old: check: warning "duplicate 'const'", warning "duplicate 'volatile'"*) tq1 :: tq2 let addQualifD qu qu2 = { qu2 with qualifD = addQualif qu qu2.qualifD } (*-------------------------------------------------------------------------- *) (* Declaration/Function related *) (*-------------------------------------------------------------------------- *) (* stdC: type section, basic integer types (and ritchie) * To understand the code, just look at the result (right part of the PM) * and go back. * old: before TSized and TPrimitive, when there was a complex TBase * there was more checks: * error "signed, unsigned valid only for char and int" (List.hd iit) * error "long or short specified with floatint type" (List.hd iit) * error "the only valid combination is long double" (List.hd iit) * error "long, short valid only for int or float" (List.hd iit) * * if do short uint i, then gcc say parse error, strange ? it is * not a parse error, it is just that we dont allow with typedef * either short/long or signed/unsigned. In fact, with * parse_typedef_fix2 (with et() and dt()) now I say too parse * error so this code is executed only when do short struct * {....} and never with a typedef cos now we parse short uint i * as short ident ident => parse error (cos after first short i * pass in dt() mode) *) let type_and_storage_from_decl { storageD = st; qualifD = qu; typeD = ty; modifierD = mods } = let ty = match ty with | None, None, None -> ( (* c++ext: *) match st with | Sto (Auto, ii) -> TAuto ii | _ -> (* old: error "no type (could default to 'int')" (List.hd iit) *) TPrimitive (TInt, Parse_info.unsafe_fake_info "int")) | None, None, Some t -> t | sign_opt, short_long_opt, topt -> let sign = match sign_opt with | None -> [] | Some (Signed t) -> [ (TSigned, t) ] | Some (UnSigned t) -> [ (TUnsigned, t) ] in let short_long = match short_long_opt with | None -> [] | Some (Short t) -> [ (TShort, t) ] | Some (Long t) -> [ (TLong, t) ] | Some (LongLong (t1, t2)) -> [ (TLong, t1); (TLong, t2) ] in let typ = match topt with | None -> None | Some typc -> Some (nQ, typc) in TSized (sign @ short_long, typ) in ((qu, ty), st, mods) let id_of_dname_for_typedef dname = match dname with | DN (None, [], IdIdent id) -> id | _ -> error "expecting an ident for typedef" (ii_of_dname dname) let make_onedecl ~v_namei ~mods ~sto v_type : onedecl = let specs = mods |> List.map (fun m -> M m) in match v_namei with (* less: could check sto, because typedef can't be anonymous since c++17 * lesS: use mods? *) | None -> EmptyDecl v_type | Some (dn, iniopt) -> ( match sto with | StoTypedef t -> (* less: use mods? *) let id = id_of_dname_for_typedef dn in TypedefDecl (t, v_type, id) | NoSto | Sto _ -> ( let more_specs = match sto with | NoSto -> [] | Sto sto -> [ ST sto ] | _ -> raise Impossible in match (dn, iniopt) with | DN n, _ -> V ( { name = n; specs = specs @ more_specs }, { v_init = iniopt; v_type } ) | DNStructuredBinding (l, (id, ids), r), Some ini -> StructuredBinding (v_type, (l, id :: ids, r), ini) | DNStructuredBinding _ids, None -> error "expecting an init for structured_binding" (ii_of_dname dn)) ) let type_and_specs_from_decl decl = let { storageD = st; _ } = decl in let t, _storage, _inline = type_and_storage_from_decl decl in match st with | NoSto -> (t, []) | Sto (Register, ii) -> (t, [ ST (Register, ii) ]) | StoTypedef ii | Sto (_, ii) -> error "storage class specified for parameter of function" ii let fixNameForParam (name, ftyp) = match name with | None, [], IdIdent id -> (id, ftyp) | _ -> let ii = Lib_parsing_cpp.ii_of_any (Name name) |> List.hd in error "parameter have qualifier" ii let type_and_storage_for_funcdef_from_decl decl = let returnType, storage, _inline = type_and_storage_from_decl decl in match storage with | StoTypedef tok -> error "function definition declared 'typedef'" tok | _x -> (returnType, storage) (* * this function is used for func definitions (not declarations). * In that case we must have a name for the parameter. * This function ensures that we give only parameterTypeDecl with well * formed Classic constructor. * * todo?: do we accept other declaration in ? * so I must add them to the compound of the deffunc. I dont * have to handle typedef pb here cos C forbid to do VF f { ... } * with VF a typedef of func cos here we dont see the name of the * argument (in the typedef) *) let (fixOldCDecl : type_ -> type_) = fun ty -> match snd ty with | TFunction { ft_params = params; _ } -> ( (* stdC: If the prototype declaration declares a parameter for a * function that you are defining (it is part of a function * definition), then you must write a name within the declarator. * Otherwise, you can omit the name. *) match Ast.unparen params with | [ P { p_name = None; p_type = ty2; _ } ] -> ( match Ast.unwrap_typeC ty2 with | TPrimitive (TVoid, _) -> ty | _ -> (* less: there is some valid case actually, when use interfaces * and generic callbacks where specific instances do not * need the extra parameter (happens a lot in plan9). * Maybe this check is better done in a scheck for C. let info = Lib_parsing_cpp.ii_of_any (Type ty2) +> List.hd in pr2 (spf "SEMANTIC: parameter name omitted (but I continue) at %s" (Parse_info.string_of_info info) ); *) ty) | params -> params |> List.iter (fun param -> match param with | P { p_name = None; p_type = _ty2; _ } -> (* see above let info = Lib_parsing_cpp.ii_of_any (Type ty2) +> List.hd in (* if majuscule, then certainly macro-parameter *) pr2 (spf "SEMANTIC: parameter name omitted (but I continue) at %s" (Parse_info.string_of_info info) ); *) () | _ -> ()); ty) (* todo? can we declare prototype in the decl or structdef, * ... => length <> but good kan meme *) | _ -> (* gcc says parse error but I dont see why *) let ii = Lib_parsing_cpp.ii_of_any (Type ty) |> List.hd in error "seems this is not a function" ii (* TODO: this is ugly ... use record! *) let fixFunc ((name, ty, _stoTODO), cp) : func_definition = let ent = { name; specs = [] } in let ftyp = match ty with | aQ, TFunction ({ ft_params = params; _ } as ftyp) -> (* it must be nullQualif, cos parser construct only this *) assert (aQ =*= nQ); (match Ast.unparen params with | [ P { p_name = None; p_type = ty2; _ } ] -> ( match Ast.unwrap_typeC ty2 with | TPrimitive (TVoid, _) -> () (* failwith "internal errror: fixOldCDecl not good" *) | _ -> ()) | params -> params |> List.iter (function | P { p_name = Some _s; _ } -> () (* failwith "internal errror: fixOldCDecl not good" *) | _ -> ())); ftyp | _ -> logger#error "weird, not a functionType. Got %s" (Ast_cpp.show_type_ ty); (* this is possible if someone used a typedef to a function type, or * when tree-sitter-cpp did some error recovery and wrongly parsed * something as a function when it's really not *) { ft_params = Parse_info.unsafe_fake_bracket []; ft_ret = ty; ft_specs = []; ft_const = None; ft_throw = []; } in ( ent, { f_type = ftyp; (* TODO move in f_specs f_storage = sto; *) f_body = cp; f_specs = []; } ) let fixFieldOrMethodDecl (xs, semicolon) : class_member = match xs with | [ V (ent, { v_init; v_type = _q, TFunction ft }) ] -> (* todo? define another type instead of onedecl? *) let fbody = match v_init with | None -> FBDecl semicolon | Some (EqInit (tokeq, InitExpr (C (Int (Some 0, iizero))))) -> FBZero (tokeq, iizero, semicolon) | _ -> error "can't assign expression to method decl" semicolon in let def = { f_type = ft; f_body = fbody; f_specs = [] } in F (Func (ent, def)) | _ -> F (DeclList (xs, semicolon)) (*-------------------------------------------------------------------------- *) (* shortcuts *) (*-------------------------------------------------------------------------- *) (* used only in the .dyp now *) let mk_e e = e let mk_funcall e1 args = Call (e1, args) let mk_constructor specs id (lp, params, rp) _cmem_initializer_opt_TODO cp = let params = Common.optlist_to_list params in let ftyp = { ft_ret = (nQ, TPrimitive (TVoid, snd id)); ft_params = (lp, params, rp); ft_specs = []; (* TODO *) ft_const = None; ft_throw = []; } in let name = name_of_id id in let ent = { name; specs } in (ent, { f_type = ftyp; f_body = cp; f_specs = [] }) let mk_destructor specs tilde id (lp, _voidopt, rp) exnopt cp = let ftyp = { ft_ret = (nQ, TPrimitive (TVoid, snd id)); ft_params = (lp, [], rp); ft_specs = []; ft_const = None; ft_throw = Option.to_list exnopt; } in let name = (None, noQscope, IdDestructor (tilde, id)) in let ent = { name; specs } in (ent, { f_type = ftyp; f_body = cp; f_specs = [] })
fl_metascanner.ml
(* $Id$ -*- tuareg -*- * ---------------------------------------------------------------------- * *) open Fl_metatoken open Printf type formal_pred = [ | `Pred of string | `NegPred of string ] type flavour = [ | `BaseDef | `Appendix ] type pkg_definition = { def_var : string; def_flav : flavour; def_preds : formal_pred list; def_value : string } type pkg_expr = { pkg_defs : pkg_definition list; pkg_children : (string * pkg_expr) list } exception Error of string let string_of_preds pl = let print = function | `Pred n -> n | `NegPred n -> "-" ^ n in if pl = [] then "" else "(" ^ ((String.concat "," (List.map print pl)) ^ ")") let scan_lexing buf = (* transform an in_channel to a token stream; 'Space' tokens are left * out. *) let (line_ref, pos0_ref, eof_found) = ((ref 1), (ref 0), (ref false)) in fun () -> let rec next line pos0 = let t = Fl_meta.token buf in match t with | Space -> next line pos0 | Newline -> next (line + 1) (Lexing.lexeme_end buf) | Eof -> (eof_found := true; produce line pos0 Eof) | _ -> produce line pos0 t and produce line pos0 t = (line_ref := line; pos0_ref := pos0; let pos = (Lexing.lexeme_start buf) - pos0 in (line, pos, t)) in if !eof_found then produce !line_ref !pos0_ref Eof else next !line_ref !pos0_ref let scan ch = scan_lexing (Lexing.from_channel ch) let parse_lexing lexbuf = let rec mk_set l = match l with | x :: l' -> if List.mem x l' then mk_set l' else x :: (mk_set l') | [] -> [] in let error_msg msg line col = Printf.sprintf "%s at line %d position %d" msg line col in let next_token = scan_lexing lexbuf in let raise_err error_fun line col = raise (Error (error_fun line col)) in let get_tok test error_fun = let (line, col, tok) = next_token () in match test tok with | None -> raise_err error_fun line col | Some result -> result in let get_rule rule arg error_fmt line col = try rule arg with | Error _ -> raise_err error_fmt line col in let rec parse_all need_rparen = match next_token () with | (line, col, Name "package") -> let n = get_tok string_tok (error_msg "String literal expected after 'package'") in let () = get_tok (const_tok LParen) (error_msg "'(' expected after string") in let subpkg = get_rule parse_all true (error_msg "Error in subpackage definition") line col in let rest = parse_all need_rparen in { pkg_defs = rest.pkg_defs; pkg_children = (n, subpkg) :: rest.pkg_children; } | (line, col, Name n) -> let (args, flav, value) = get_rule parse_properties () (error_msg "Error in 'name = value' clause") line col in let rest = parse_all need_rparen in (* TODO: Check args *) let args' = List.sort compare (mk_set args) in let def = { def_var = n; def_flav = flav; def_preds = args'; def_value = value; } in { pkg_defs = def :: rest.pkg_defs; pkg_children = rest.pkg_children; } | (line, col, Eof) -> (if need_rparen then raise_err (Printf.sprintf "Unexpected end of file in line %d position %d") line col else (); { pkg_defs = []; pkg_children = []; }) | (line, col, RParen) -> (if not need_rparen then raise_err (Printf.sprintf "Unexpected end of file in line %d position %d") line col else (); { pkg_defs = []; pkg_children = []; }) | (line, col, _) -> raise_err (error_msg "Expected 'name = value' clause") line col and parse_properties () = match next_token () with | (line, col, LParen) -> let arg1 = parse_argument () in let args = parse_arguments () in let flav = parse_flavour () in let s = get_tok string_tok (error_msg "Expected string constant after '='") in ((arg1 :: args), flav, s) | (line, col, Equal) -> let s = get_tok string_tok (error_msg "'=' must be followed by a string constant") in ([], `BaseDef, s) | (line, col, PlusEqual) -> let s = get_tok string_tok (error_msg "'+=' must be followed by a string constant") in ([], `Appendix, s) | (line, col, _) -> raise_err (error_msg "Expected a '=' or a '(arguments,...)=' clause") line col and parse_arguments () = match next_token () with | (line, col, Comma) -> let arg = parse_argument () in let args = parse_arguments () in arg :: args | (_, _, RParen) -> [] | (line, col, _) -> raise_err (error_msg "Another predicate or a ')' expected") line col and parse_argument () = match next_token () with | (line, col, Name n) -> `Pred n | (line, col, Minus) -> let n = get_tok name_tok (error_msg "Name expected after '-'") in `NegPred n | (line, col, _) -> raise_err (error_msg "Name or -Name expected") line col and parse_flavour () = match next_token () with | (line, col, Equal) -> `BaseDef | (line, col, PlusEqual) -> `Appendix | (line, col, _) -> raise_err (error_msg "'+' or '+=' expected") line col in let rec check_defs p l = match l with | [] -> () | def :: l' -> (List.iter (fun def' -> if (def.def_var = def'.def_var) && ((def.def_preds = def'.def_preds) && ((def.def_flav = `BaseDef) && (def'.def_flav = `BaseDef))) then (let prefix = if p = "" then "" else "In subpackage " ^ (p ^ ": ") in let args = string_of_preds def.def_preds in raise (Error (prefix ^ ("Double definition of '" ^ (def.def_var ^ (args ^ "'")))))) else ()) l'; check_defs p l') in let rec check_pkg p pkg = (check_defs p pkg.pkg_defs; let l = ref [] in List.iter (fun (n, subpkg) -> let p' = if p = "" then n else p ^ ("." ^ n) in (if List.mem n !l then raise (Error ("Double definition for subpackage " ^ p')) else (); if String.contains n '.' then raise (Error ("Subpackage name must not contain '.': \"" ^ (n ^ "\""))) else (); check_pkg p' subpkg; l := n :: !l)) pkg.pkg_children) in try let pkg = parse_all false in (check_pkg "" pkg; pkg) with | Error "" -> raise (Error "Syntax Error") let parse ch = parse_lexing (Lexing.from_channel ch) let escape s = (* no Str available :-( *) let b = Buffer.create (String.length s) in (for k = 0 to (String.length s) - 1 do (match s.[k] with | '\\' -> Buffer.add_string b "\\\\" | '"' -> Buffer.add_string b "\\\"" | c -> Buffer.add_char b c) done; Buffer.contents b) let print_def f def = let format_pred = function | `Pred s -> s | `NegPred s -> "-" ^ s in fprintf f "%s%s %s \"%s\"\n" def.def_var (match def.def_preds with | [] -> "" | l -> "(" ^ ((String.concat "," (List.map format_pred l)) ^ ")")) (match def.def_flav with | `BaseDef -> "=" | `Appendix -> "+=") (escape def.def_value) let rec print f pkg = (List.iter (print_def f) pkg.pkg_defs; List.iter (fun (name, child) -> (fprintf f "\npackage \"%s\" (\n" (escape name); print f child; fprintf f ")\n")) pkg.pkg_children) let rec remove_dups l = (* FIXME: O(n^2) *) match l with x :: l' -> if List.mem x l' then remove_dups l' else x::remove_dups l' | [] -> [] let lookup_2 name predicate_list def = let fulfills actual_preds formal_preds = List.for_all (function | `Pred n -> List.mem n predicate_list | `NegPred n -> not (List.mem n predicate_list)) formal_preds in let rec search_base best_n best_value l = match l with | [] -> if best_n >= 0 then best_value else raise Not_found | def :: l' -> if (name = def.def_var) && ((def.def_flav = `BaseDef) && ((fulfills predicate_list def.def_preds) && ((List.length def.def_preds) > best_n))) then search_base (List.length def.def_preds) (def.def_value, def.def_preds) l' else search_base best_n best_value l' in let rec search_appdx l = match l with | [] -> [] | def :: l' -> if (name = def.def_var) && ((def.def_flav = `Appendix) && (fulfills predicate_list def.def_preds)) then (def.def_value, def.def_preds) :: (search_appdx l') else search_appdx l' in let value_a, preds_a = search_base (-1) ("",[]) def in let additions = search_appdx def in let values_b = List.map fst additions in let preds_b = List.flatten (List.map snd additions) in let value = String.concat " " (value_a :: values_b) in let preds = remove_dups (preds_a @ preds_b) in (value, preds) let lookup name predicate_list def = fst(lookup_2 name predicate_list def) let predicate_exists p defs = List.exists (fun def -> List.exists (function | `Pred n -> n = p | `NegPred n -> n = p) def.def_preds) defs
(* $Id$ -*- tuareg -*- * ---------------------------------------------------------------------- * *)
exn.ml
open! Import type t = exn [@@deriving_inline sexp_of] let sexp_of_t = (sexp_of_exn : t -> Sexplib0.Sexp.t) [@@@end] let exit = Caml.exit exception Finally of t * t [@@deriving_inline sexp] let () = Sexplib0.Sexp_conv.Exn_converter.add [%extension_constructor Finally] (function | Finally (arg0__001_, arg1__002_) -> let res0__003_ = sexp_of_t arg0__001_ and res1__004_ = sexp_of_t arg1__002_ in Sexplib0.Sexp.List [ Sexplib0.Sexp.Atom "exn.ml.Finally"; res0__003_; res1__004_ ] | _ -> assert false) ;; [@@@end] exception Reraised of string * t [@@deriving_inline sexp] let () = Sexplib0.Sexp_conv.Exn_converter.add [%extension_constructor Reraised] (function | Reraised (arg0__005_, arg1__006_) -> let res0__007_ = sexp_of_string arg0__005_ and res1__008_ = sexp_of_t arg1__006_ in Sexplib0.Sexp.List [ Sexplib0.Sexp.Atom "exn.ml.Reraised"; res0__007_; res1__008_ ] | _ -> assert false) ;; [@@@end] exception Sexp of Sexp.t (* We install a custom exn-converter rather than use: {[ exception Sexp of Sexp.t [@@deriving_inline sexp] (* ... *) [@@@end] ]} to eliminate the extra wrapping of [(Sexp ...)]. *) let () = Sexplib0.Sexp_conv.Exn_converter.add [%extension_constructor Sexp] (function | Sexp t -> t | _ -> (* Reaching this branch indicates a bug in sexplib. *) assert false) ;; let create_s sexp = Sexp sexp let raise_with_original_backtrace t backtrace = Caml.Printexc.raise_with_backtrace t backtrace ;; external is_phys_equal_most_recent : t -> bool = "Base_caml_exn_is_most_recent_exn" let reraise exn str = let exn' = Reraised (str, exn) in if is_phys_equal_most_recent exn then ( let bt = Caml.Printexc.get_raw_backtrace () in raise_with_original_backtrace exn' bt) else raise exn' ;; let reraisef exc format = Printf.ksprintf (fun str () -> reraise exc str) format let to_string exc = Sexp.to_string_hum ~indent:2 (sexp_of_exn exc) let to_string_mach exc = Sexp.to_string_mach (sexp_of_exn exc) let sexp_of_t = sexp_of_exn let protectx ~f x ~(finally : _ -> unit) = match f x with | res -> finally x; res | exception exn -> let bt = Caml.Printexc.get_raw_backtrace () in (match finally x with | () -> raise_with_original_backtrace exn bt | exception final_exn -> (* Unfortunately, the backtrace of the [final_exn] is discarded here. *) raise_with_original_backtrace (Finally (exn, final_exn)) bt) ;; let protect ~f ~finally = protectx ~f () ~finally let does_raise (type a) (f : unit -> a) = try ignore (f () : a); false with | _ -> true ;; include Pretty_printer.Register_pp (struct type t = exn let pp ppf t = match sexp_of_exn_opt t with | Some sexp -> Sexp.pp_hum ppf sexp | None -> Caml.Format.pp_print_string ppf (Caml.Printexc.to_string t) ;; let module_name = "Base.Exn" end) let print_with_backtrace exc raw_backtrace = Caml.Format.eprintf "@[<2>Uncaught exception:@\n@\n@[%a@]@]@\n@." pp exc; if Caml.Printexc.backtrace_status () then Caml.Printexc.print_raw_backtrace Caml.stderr raw_backtrace; Caml.flush Caml.stderr ;; let set_uncaught_exception_handler () = Caml.Printexc.set_uncaught_exception_handler print_with_backtrace ;; let handle_uncaught_aux ~do_at_exit ~exit f = try f () with | exc -> let raw_backtrace = Caml.Printexc.get_raw_backtrace () in (* One reason to run [do_at_exit] handlers before printing out the error message is that it helps curses applications bring the terminal in a good state, otherwise the error message might get corrupted. Also, the OCaml top-level uncaught exception handler does the same. *) if do_at_exit then ( try Caml.do_at_exit () with | _ -> ()); (try print_with_backtrace exc raw_backtrace with | _ -> (try Caml.Printf.eprintf "Exn.handle_uncaught could not print; exiting anyway\n%!" with | _ -> ())); exit 1 ;; let handle_uncaught_and_exit f = handle_uncaught_aux f ~exit ~do_at_exit:true let handle_uncaught ~exit:must_exit f = handle_uncaught_aux f ~exit:(if must_exit then exit else ignore) ~do_at_exit:must_exit ;; let reraise_uncaught str func = try func () with | exn -> let bt = Caml.Printexc.get_raw_backtrace () in raise_with_original_backtrace (Reraised (str, exn)) bt ;; external clear_backtrace : unit -> unit = "Base_clear_caml_backtrace_pos" [@@noalloc] let raise_without_backtrace e = (* We clear the backtrace to reduce confusion, so that people don't think whatever is stored corresponds to this raise. *) clear_backtrace (); Caml.raise_notrace e ;; let initialize_module () = set_uncaught_exception_handler () module Private = struct let clear_backtrace = clear_backtrace end
dune
(executable (name main) (libraries opium opium-graphql))
dune
(library (name ojs_list_server) (public_name ojs_list.server) (wrapped true) (flags -g :standard -bin-annot -w -6-7-9-10-27-32-33-34-35-36-39-50-52 -no-strict-sequence) (libraries ojs_list ojs_base.server yojson lwt) ; (preprocess (pps )) )
resolve.mli
val find_dot_git : unit -> string option (** Search for a [.git] directory from the current directory upwards. Return [None] if none could be found. *) val parse_dot_git : unit -> (Types.config, string) result (** Search for a [.git] directory from the current directory upwards, and parse it. Return [Error] with an error message if none could be found, or in case of parse error. *) val gitlab_project_name : ?remote:string -> Types.config -> string option (** [gitlab_project_name ~remote config] extracts the Gitlab project name from the URL of the remote named [remote] in [config]. If [remote] is not specified, and there is more than one remote, returns [None]. Also returns [None] if no suitable remote URL can be found. *)
ocollection.ml
(*****************************************************************************) (* Collection *) (*****************************************************************************) (*---------------------------------------------------------------------------*) type ('a, 'b) view = Empty | Cons of 'a * 'b class virtual ['a] ocollection = object (o : 'o) inherit Objet.objet method virtual empty : 'o method virtual add : 'a -> 'o method virtual iter : ('a -> unit) -> unit method virtual view : ('a, 'o) view (* no need virtual, but better to redefine for efficiency *) method virtual del : 'a -> 'o (* can do default with: view+iter *) method virtual mem : 'a -> bool (* can do default with: mem(tolist) *) method virtual null : bool (* can do default with: lenght(tolist)= 0 *) method add2 : 'a -> unit = fun a -> o#add a |> ignore; () method del2 : 'a -> unit = fun a -> o#del a |> ignore; () method clear : unit = o#iter (fun e -> o#del2 e) method fold : 'b. ('b -> 'a -> 'b) -> 'b -> 'b = fun f a -> let a = ref a in o#iter (fun e -> a := f !a e); !a method tolist : 'a list = List.rev (o#fold (fun acc e -> e :: acc) []) method fromlist : 'a list -> 'o = fun xs -> xs |> List.fold_left (fun o e -> o#add e) o#empty method length : int = (* oldsimple: o#tolist +> List.length *) (* opti: *) let count = ref 0 in o#iter (fun _e -> incr count); !count method exists : ('a -> bool) -> bool = fun f -> o#tolist |> List.exists f method filter : ('a -> bool) -> 'o = fun f -> (* iter and call add from empty, or del *) o#tolist |> List.filter f |> o#fromlist (* forall, fold, map *) method getone : 'a = match o#view with | Cons (e, _tl) -> e | Empty -> failwith "no head" method others : 'o = match o#view with | Cons (_e, tl) -> tl | Empty -> failwith "no tail" end
(*****************************************************************************) (* Collection *) (*****************************************************************************)
gtest-internal-inl.h
// Copyright 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Utility functions and classes used by the Google C++ testing framework.// // This file contains purely Google Test's internal implementation. Please // DO NOT #INCLUDE IT IN A USER PROGRAM. #ifndef GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_ #define GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_ #ifndef _WIN32_WCE # include <errno.h> #endif // !_WIN32_WCE #include <stddef.h> #include <stdlib.h> // For strtoll/_strtoul64/malloc/free. #include <string.h> // For memmove. #include <algorithm> #include <cstdint> #include <memory> #include <string> #include <vector> #include "gtest/internal/gtest-port.h" #if GTEST_CAN_STREAM_RESULTS_ # include <arpa/inet.h> // NOLINT # include <netdb.h> // NOLINT #endif #if GTEST_OS_WINDOWS # include <windows.h> // NOLINT #endif // GTEST_OS_WINDOWS #include "gtest/gtest.h" #include "gtest/gtest-spi.h" GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ /* class A needs to have dll-interface to be used by clients of class B */) namespace testing { // Declares the flags. // // We don't want the users to modify this flag in the code, but want // Google Test's own unit tests to be able to access it. Therefore we // declare it here as opposed to in gtest.h. GTEST_DECLARE_bool_(death_test_use_fork); namespace internal { // The value of GetTestTypeId() as seen from within the Google Test // library. This is solely for testing GetTestTypeId(). GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; // Names of the flags (needed for parsing Google Test flags). const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; const char kBreakOnFailureFlag[] = "break_on_failure"; const char kCatchExceptionsFlag[] = "catch_exceptions"; const char kColorFlag[] = "color"; const char kFailFast[] = "fail_fast"; const char kFilterFlag[] = "filter"; const char kListTestsFlag[] = "list_tests"; const char kOutputFlag[] = "output"; const char kBriefFlag[] = "brief"; const char kPrintTimeFlag[] = "print_time"; const char kPrintUTF8Flag[] = "print_utf8"; const char kRandomSeedFlag[] = "random_seed"; const char kRepeatFlag[] = "repeat"; const char kShuffleFlag[] = "shuffle"; const char kStackTraceDepthFlag[] = "stack_trace_depth"; const char kStreamResultToFlag[] = "stream_result_to"; const char kThrowOnFailureFlag[] = "throw_on_failure"; const char kFlagfileFlag[] = "flagfile"; // A valid random seed must be in [1, kMaxRandomSeed]. const int kMaxRandomSeed = 99999; // g_help_flag is true if and only if the --help flag or an equivalent form // is specified on the command line. GTEST_API_ extern bool g_help_flag; // Returns the current time in milliseconds. GTEST_API_ TimeInMillis GetTimeInMillis(); // Returns true if and only if Google Test should use colors in the output. GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); // Formats the given time in milliseconds as seconds. GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); // Converts the given time in milliseconds to a date string in the ISO 8601 // format, without the timezone information. N.B.: due to the use the // non-reentrant localtime() function, this function is not thread safe. Do // not use it in any code that can be called from multiple threads. GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); // Parses a string for an Int32 flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. GTEST_API_ bool ParseInt32Flag( const char* str, const char* flag, int32_t* value); // Returns a random seed in range [1, kMaxRandomSeed] based on the // given --gtest_random_seed flag value. inline int GetRandomSeedFromFlag(int32_t random_seed_flag) { const unsigned int raw_seed = (random_seed_flag == 0) ? static_cast<unsigned int>(GetTimeInMillis()) : static_cast<unsigned int>(random_seed_flag); // Normalizes the actual seed to range [1, kMaxRandomSeed] such that // it's easy to type. const int normalized_seed = static_cast<int>((raw_seed - 1U) % static_cast<unsigned int>(kMaxRandomSeed)) + 1; return normalized_seed; } // Returns the first valid random seed after 'seed'. The behavior is // undefined if 'seed' is invalid. The seed after kMaxRandomSeed is // considered to be 1. inline int GetNextRandomSeed(int seed) { GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) << "Invalid random seed " << seed << " - must be in [1, " << kMaxRandomSeed << "]."; const int next_seed = seed + 1; return (next_seed > kMaxRandomSeed) ? 1 : next_seed; } // This class saves the values of all Google Test flags in its c'tor, and // restores them in its d'tor. class GTestFlagSaver { public: // The c'tor. GTestFlagSaver() { also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); break_on_failure_ = GTEST_FLAG(break_on_failure); catch_exceptions_ = GTEST_FLAG(catch_exceptions); color_ = GTEST_FLAG(color); death_test_style_ = GTEST_FLAG(death_test_style); death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); fail_fast_ = GTEST_FLAG(fail_fast); filter_ = GTEST_FLAG(filter); internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); list_tests_ = GTEST_FLAG(list_tests); output_ = GTEST_FLAG(output); brief_ = GTEST_FLAG(brief); print_time_ = GTEST_FLAG(print_time); print_utf8_ = GTEST_FLAG(print_utf8); random_seed_ = GTEST_FLAG(random_seed); repeat_ = GTEST_FLAG(repeat); shuffle_ = GTEST_FLAG(shuffle); stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); stream_result_to_ = GTEST_FLAG(stream_result_to); throw_on_failure_ = GTEST_FLAG(throw_on_failure); } // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. ~GTestFlagSaver() { GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; GTEST_FLAG(break_on_failure) = break_on_failure_; GTEST_FLAG(catch_exceptions) = catch_exceptions_; GTEST_FLAG(color) = color_; GTEST_FLAG(death_test_style) = death_test_style_; GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; GTEST_FLAG(filter) = filter_; GTEST_FLAG(fail_fast) = fail_fast_; GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; GTEST_FLAG(list_tests) = list_tests_; GTEST_FLAG(output) = output_; GTEST_FLAG(brief) = brief_; GTEST_FLAG(print_time) = print_time_; GTEST_FLAG(print_utf8) = print_utf8_; GTEST_FLAG(random_seed) = random_seed_; GTEST_FLAG(repeat) = repeat_; GTEST_FLAG(shuffle) = shuffle_; GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; GTEST_FLAG(stream_result_to) = stream_result_to_; GTEST_FLAG(throw_on_failure) = throw_on_failure_; } private: // Fields for saving the original values of flags. bool also_run_disabled_tests_; bool break_on_failure_; bool catch_exceptions_; std::string color_; std::string death_test_style_; bool death_test_use_fork_; bool fail_fast_; std::string filter_; std::string internal_run_death_test_; bool list_tests_; std::string output_; bool brief_; bool print_time_; bool print_utf8_; int32_t random_seed_; int32_t repeat_; bool shuffle_; int32_t stack_trace_depth_; std::string stream_result_to_; bool throw_on_failure_; } GTEST_ATTRIBUTE_UNUSED_; // Converts a Unicode code point to a narrow string in UTF-8 encoding. // code_point parameter is of type UInt32 because wchar_t may not be // wide enough to contain a code point. // If the code_point is not a valid Unicode code point // (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted // to "(Invalid Unicode 0xXXXXXXXX)". GTEST_API_ std::string CodePointToUtf8(uint32_t code_point); // Converts a wide string to a narrow string in UTF-8 encoding. // The wide string is assumed to have the following encoding: // UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin) // UTF-32 if sizeof(wchar_t) == 4 (on Linux) // Parameter str points to a null-terminated wide string. // Parameter num_chars may additionally limit the number // of wchar_t characters processed. -1 is used when the entire string // should be processed. // If the string contains code points that are not valid Unicode code points // (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output // as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding // and contains invalid UTF-16 surrogate pairs, values in those pairs // will be encoded as individual Unicode characters from Basic Normal Plane. GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); // Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file // if the variable is present. If a file already exists at this location, this // function will write over it. If the variable is present, but the file cannot // be created, prints an error and exits. void WriteToShardStatusFileIfNeeded(); // Checks whether sharding is enabled by examining the relevant // environment variable values. If the variables are present, // but inconsistent (e.g., shard_index >= total_shards), prints // an error and exits. If in_subprocess_for_death_test, sharding is // disabled because it must only be applied to the original test // process. Otherwise, we could filter out death tests we intended to execute. GTEST_API_ bool ShouldShard(const char* total_shards_str, const char* shard_index_str, bool in_subprocess_for_death_test); // Parses the environment variable var as a 32-bit integer. If it is unset, // returns default_val. If it is not a 32-bit integer, prints an error and // and aborts. GTEST_API_ int32_t Int32FromEnvOrDie(const char* env_var, int32_t default_val); // Given the total number of shards, the shard index, and the test id, // returns true if and only if the test should be run on this shard. The test id // is some arbitrary but unique non-negative integer assigned to each test // method. Assumes that 0 <= shard_index < total_shards. GTEST_API_ bool ShouldRunTestOnShard( int total_shards, int shard_index, int test_id); // STL container utilities. // Returns the number of elements in the given container that satisfy // the given predicate. template <class Container, typename Predicate> inline int CountIf(const Container& c, Predicate predicate) { // Implemented as an explicit loop since std::count_if() in libCstd on // Solaris has a non-standard signature. int count = 0; for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { if (predicate(*it)) ++count; } return count; } // Applies a function/functor to each element in the container. template <class Container, typename Functor> void ForEach(const Container& c, Functor functor) { std::for_each(c.begin(), c.end(), functor); } // Returns the i-th element of the vector, or default_value if i is not // in range [0, v.size()). template <typename E> inline E GetElementOr(const std::vector<E>& v, int i, E default_value) { return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[static_cast<size_t>(i)]; } // Performs an in-place shuffle of a range of the vector's elements. // 'begin' and 'end' are element indices as an STL-style range; // i.e. [begin, end) are shuffled, where 'end' == size() means to // shuffle to the end of the vector. template <typename E> void ShuffleRange(internal::Random* random, int begin, int end, std::vector<E>* v) { const int size = static_cast<int>(v->size()); GTEST_CHECK_(0 <= begin && begin <= size) << "Invalid shuffle range start " << begin << ": must be in range [0, " << size << "]."; GTEST_CHECK_(begin <= end && end <= size) << "Invalid shuffle range finish " << end << ": must be in range [" << begin << ", " << size << "]."; // Fisher-Yates shuffle, from // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle for (int range_width = end - begin; range_width >= 2; range_width--) { const int last_in_range = begin + range_width - 1; const int selected = begin + static_cast<int>(random->Generate(static_cast<uint32_t>(range_width))); std::swap((*v)[static_cast<size_t>(selected)], (*v)[static_cast<size_t>(last_in_range)]); } } // Performs an in-place shuffle of the vector's elements. template <typename E> inline void Shuffle(internal::Random* random, std::vector<E>* v) { ShuffleRange(random, 0, static_cast<int>(v->size()), v); } // A function for deleting an object. Handy for being used as a // functor. template <typename T> static void Delete(T* x) { delete x; } // A predicate that checks the key of a TestProperty against a known key. // // TestPropertyKeyIs is copyable. class TestPropertyKeyIs { public: // Constructor. // // TestPropertyKeyIs has NO default constructor. explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} // Returns true if and only if the test name of test property matches on key_. bool operator()(const TestProperty& test_property) const { return test_property.key() == key_; } private: std::string key_; }; // Class UnitTestOptions. // // This class contains functions for processing options the user // specifies when running the tests. It has only static members. // // In most cases, the user can specify an option using either an // environment variable or a command line flag. E.g. you can set the // test filter using either GTEST_FILTER or --gtest_filter. If both // the variable and the flag are present, the latter overrides the // former. class GTEST_API_ UnitTestOptions { public: // Functions for processing the gtest_output flag. // Returns the output format, or "" for normal printed output. static std::string GetOutputFormat(); // Returns the absolute path of the requested output file, or the // default (test_detail.xml in the original working directory) if // none was explicitly specified. static std::string GetAbsolutePathToOutputFile(); // Functions for processing the gtest_filter flag. // Returns true if and only if the user-specified filter matches the test // suite name and the test name. static bool FilterMatchesTest(const std::string& test_suite_name, const std::string& test_name); #if GTEST_OS_WINDOWS // Function for supporting the gtest_catch_exception flag. // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. // This function is useful as an __except condition. static int GTestShouldProcessSEH(DWORD exception_code); #endif // GTEST_OS_WINDOWS // Returns true if "name" matches the ':' separated list of glob-style // filters in "filter". static bool MatchesFilter(const std::string& name, const char* filter); }; // Returns the current application's name, removing directory path if that // is present. Used by UnitTestOptions::GetOutputFile. GTEST_API_ FilePath GetCurrentExecutableName(); // The role interface for getting the OS stack trace as a string. class OsStackTraceGetterInterface { public: OsStackTraceGetterInterface() {} virtual ~OsStackTraceGetterInterface() {} // Returns the current OS stack trace as an std::string. Parameters: // // max_depth - the maximum number of stack frames to be included // in the trace. // skip_count - the number of top frames to be skipped; doesn't count // against max_depth. virtual std::string CurrentStackTrace(int max_depth, int skip_count) = 0; // UponLeavingGTest() should be called immediately before Google Test calls // user code. It saves some information about the current stack that // CurrentStackTrace() will use to find and hide Google Test stack frames. virtual void UponLeavingGTest() = 0; // This string is inserted in place of stack frames that are part of // Google Test's implementation. static const char* const kElidedFramesMarker; private: GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); }; // A working implementation of the OsStackTraceGetterInterface interface. class OsStackTraceGetter : public OsStackTraceGetterInterface { public: OsStackTraceGetter() {} std::string CurrentStackTrace(int max_depth, int skip_count) override; void UponLeavingGTest() override; private: #if GTEST_HAS_ABSL Mutex mutex_; // Protects all internal state. // We save the stack frame below the frame that calls user code. // We do this because the address of the frame immediately below // the user code changes between the call to UponLeavingGTest() // and any calls to the stack trace code from within the user code. void* caller_frame_ = nullptr; #endif // GTEST_HAS_ABSL GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); }; // Information about a Google Test trace point. struct TraceInfo { const char* file; int line; std::string message; }; // This is the default global test part result reporter used in UnitTestImpl. // This class should only be used by UnitTestImpl. class DefaultGlobalTestPartResultReporter : public TestPartResultReporterInterface { public: explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); // Implements the TestPartResultReporterInterface. Reports the test part // result in the current test. void ReportTestPartResult(const TestPartResult& result) override; private: UnitTestImpl* const unit_test_; GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); }; // This is the default per thread test part result reporter used in // UnitTestImpl. This class should only be used by UnitTestImpl. class DefaultPerThreadTestPartResultReporter : public TestPartResultReporterInterface { public: explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); // Implements the TestPartResultReporterInterface. The implementation just // delegates to the current global test part result reporter of *unit_test_. void ReportTestPartResult(const TestPartResult& result) override; private: UnitTestImpl* const unit_test_; GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); }; // The private implementation of the UnitTest class. We don't protect // the methods under a mutex, as this class is not accessible by a // user and the UnitTest class that delegates work to this class does // proper locking. class GTEST_API_ UnitTestImpl { public: explicit UnitTestImpl(UnitTest* parent); virtual ~UnitTestImpl(); // There are two different ways to register your own TestPartResultReporter. // You can register your own repoter to listen either only for test results // from the current thread or for results from all threads. // By default, each per-thread test result repoter just passes a new // TestPartResult to the global test result reporter, which registers the // test part result for the currently running test. // Returns the global test part result reporter. TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); // Sets the global test part result reporter. void SetGlobalTestPartResultReporter( TestPartResultReporterInterface* reporter); // Returns the test part result reporter for the current thread. TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); // Sets the test part result reporter for the current thread. void SetTestPartResultReporterForCurrentThread( TestPartResultReporterInterface* reporter); // Gets the number of successful test suites. int successful_test_suite_count() const; // Gets the number of failed test suites. int failed_test_suite_count() const; // Gets the number of all test suites. int total_test_suite_count() const; // Gets the number of all test suites that contain at least one test // that should run. int test_suite_to_run_count() const; // Gets the number of successful tests. int successful_test_count() const; // Gets the number of skipped tests. int skipped_test_count() const; // Gets the number of failed tests. int failed_test_count() const; // Gets the number of disabled tests that will be reported in the XML report. int reportable_disabled_test_count() const; // Gets the number of disabled tests. int disabled_test_count() const; // Gets the number of tests to be printed in the XML report. int reportable_test_count() const; // Gets the number of all tests. int total_test_count() const; // Gets the number of tests that should run. int test_to_run_count() const; // Gets the time of the test program start, in ms from the start of the // UNIX epoch. TimeInMillis start_timestamp() const { return start_timestamp_; } // Gets the elapsed time, in milliseconds. TimeInMillis elapsed_time() const { return elapsed_time_; } // Returns true if and only if the unit test passed (i.e. all test suites // passed). bool Passed() const { return !Failed(); } // Returns true if and only if the unit test failed (i.e. some test suite // failed or something outside of all tests failed). bool Failed() const { return failed_test_suite_count() > 0 || ad_hoc_test_result()->Failed(); } // Gets the i-th test suite among all the test suites. i can range from 0 to // total_test_suite_count() - 1. If i is not in that range, returns NULL. const TestSuite* GetTestSuite(int i) const { const int index = GetElementOr(test_suite_indices_, i, -1); return index < 0 ? nullptr : test_suites_[static_cast<size_t>(i)]; } // Legacy API is deprecated but still available #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ const TestCase* GetTestCase(int i) const { return GetTestSuite(i); } #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ // Gets the i-th test suite among all the test suites. i can range from 0 to // total_test_suite_count() - 1. If i is not in that range, returns NULL. TestSuite* GetMutableSuiteCase(int i) { const int index = GetElementOr(test_suite_indices_, i, -1); return index < 0 ? nullptr : test_suites_[static_cast<size_t>(index)]; } // Provides access to the event listener list. TestEventListeners* listeners() { return &listeners_; } // Returns the TestResult for the test that's currently running, or // the TestResult for the ad hoc test if no test is running. TestResult* current_test_result(); // Returns the TestResult for the ad hoc test. const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } // Sets the OS stack trace getter. // // Does nothing if the input and the current OS stack trace getter // are the same; otherwise, deletes the old getter and makes the // input the current getter. void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); // Returns the current OS stack trace getter if it is not NULL; // otherwise, creates an OsStackTraceGetter, makes it the current // getter, and returns it. OsStackTraceGetterInterface* os_stack_trace_getter(); // Returns the current OS stack trace as an std::string. // // The maximum number of stack frames to be included is specified by // the gtest_stack_trace_depth flag. The skip_count parameter // specifies the number of top frames to be skipped, which doesn't // count against the number of frames to be included. // // For example, if Foo() calls Bar(), which in turn calls // CurrentOsStackTraceExceptTop(1), Foo() will be included in the // trace but Bar() and CurrentOsStackTraceExceptTop() won't. std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; // Finds and returns a TestSuite with the given name. If one doesn't // exist, creates one and returns it. // // Arguments: // // test_suite_name: name of the test suite // type_param: the name of the test's type parameter, or NULL if // this is not a typed or a type-parameterized test. // set_up_tc: pointer to the function that sets up the test suite // tear_down_tc: pointer to the function that tears down the test suite TestSuite* GetTestSuite(const char* test_suite_name, const char* type_param, internal::SetUpTestSuiteFunc set_up_tc, internal::TearDownTestSuiteFunc tear_down_tc); // Legacy API is deprecated but still available #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ TestCase* GetTestCase(const char* test_case_name, const char* type_param, internal::SetUpTestSuiteFunc set_up_tc, internal::TearDownTestSuiteFunc tear_down_tc) { return GetTestSuite(test_case_name, type_param, set_up_tc, tear_down_tc); } #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ // Adds a TestInfo to the unit test. // // Arguments: // // set_up_tc: pointer to the function that sets up the test suite // tear_down_tc: pointer to the function that tears down the test suite // test_info: the TestInfo object void AddTestInfo(internal::SetUpTestSuiteFunc set_up_tc, internal::TearDownTestSuiteFunc tear_down_tc, TestInfo* test_info) { #if GTEST_HAS_DEATH_TEST // In order to support thread-safe death tests, we need to // remember the original working directory when the test program // was first invoked. We cannot do this in RUN_ALL_TESTS(), as // the user may have changed the current directory before calling // RUN_ALL_TESTS(). Therefore we capture the current directory in // AddTestInfo(), which is called to register a TEST or TEST_F // before main() is reached. if (original_working_dir_.IsEmpty()) { original_working_dir_.Set(FilePath::GetCurrentDir()); GTEST_CHECK_(!original_working_dir_.IsEmpty()) << "Failed to get the current working directory."; } #endif // GTEST_HAS_DEATH_TEST GetTestSuite(test_info->test_suite_name(), test_info->type_param(), set_up_tc, tear_down_tc) ->AddTestInfo(test_info); } // Returns ParameterizedTestSuiteRegistry object used to keep track of // value-parameterized tests and instantiate and register them. internal::ParameterizedTestSuiteRegistry& parameterized_test_registry() { return parameterized_test_registry_; } std::set<std::string>* ignored_parameterized_test_suites() { return &ignored_parameterized_test_suites_; } // Returns TypeParameterizedTestSuiteRegistry object used to keep track of // type-parameterized tests and instantiations of them. internal::TypeParameterizedTestSuiteRegistry& type_parameterized_test_registry() { return type_parameterized_test_registry_; } // Sets the TestSuite object for the test that's currently running. void set_current_test_suite(TestSuite* a_current_test_suite) { current_test_suite_ = a_current_test_suite; } // Sets the TestInfo object for the test that's currently running. If // current_test_info is NULL, the assertion results will be stored in // ad_hoc_test_result_. void set_current_test_info(TestInfo* a_current_test_info) { current_test_info_ = a_current_test_info; } // Registers all parameterized tests defined using TEST_P and // INSTANTIATE_TEST_SUITE_P, creating regular tests for each test/parameter // combination. This method can be called more then once; it has guards // protecting from registering the tests more then once. If // value-parameterized tests are disabled, RegisterParameterizedTests is // present but does nothing. void RegisterParameterizedTests(); // Runs all tests in this UnitTest object, prints the result, and // returns true if all tests are successful. If any exception is // thrown during a test, this test is considered to be failed, but // the rest of the tests will still be run. bool RunAllTests(); // Clears the results of all tests, except the ad hoc tests. void ClearNonAdHocTestResult() { ForEach(test_suites_, TestSuite::ClearTestSuiteResult); } // Clears the results of ad-hoc test assertions. void ClearAdHocTestResult() { ad_hoc_test_result_.Clear(); } // Adds a TestProperty to the current TestResult object when invoked in a // context of a test or a test suite, or to the global property set. If the // result already contains a property with the same key, the value will be // updated. void RecordProperty(const TestProperty& test_property); enum ReactionToSharding { HONOR_SHARDING_PROTOCOL, IGNORE_SHARDING_PROTOCOL }; // Matches the full name of each test against the user-specified // filter to decide whether the test should run, then records the // result in each TestSuite and TestInfo object. // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests // based on sharding variables in the environment. // Returns the number of tests that should run. int FilterTests(ReactionToSharding shard_tests); // Prints the names of the tests matching the user-specified filter flag. void ListTestsMatchingFilter(); const TestSuite* current_test_suite() const { return current_test_suite_; } TestInfo* current_test_info() { return current_test_info_; } const TestInfo* current_test_info() const { return current_test_info_; } // Returns the vector of environments that need to be set-up/torn-down // before/after the tests are run. std::vector<Environment*>& environments() { return environments_; } // Getters for the per-thread Google Test trace stack. std::vector<TraceInfo>& gtest_trace_stack() { return *(gtest_trace_stack_.pointer()); } const std::vector<TraceInfo>& gtest_trace_stack() const { return gtest_trace_stack_.get(); } #if GTEST_HAS_DEATH_TEST void InitDeathTestSubprocessControlInfo() { internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); } // Returns a pointer to the parsed --gtest_internal_run_death_test // flag, or NULL if that flag was not specified. // This information is useful only in a death test child process. // Must not be called before a call to InitGoogleTest. const InternalRunDeathTestFlag* internal_run_death_test_flag() const { return internal_run_death_test_flag_.get(); } // Returns a pointer to the current death test factory. internal::DeathTestFactory* death_test_factory() { return death_test_factory_.get(); } void SuppressTestEventsIfInSubprocess(); friend class ReplaceDeathTestFactory; #endif // GTEST_HAS_DEATH_TEST // Initializes the event listener performing XML output as specified by // UnitTestOptions. Must not be called before InitGoogleTest. void ConfigureXmlOutput(); #if GTEST_CAN_STREAM_RESULTS_ // Initializes the event listener for streaming test results to a socket. // Must not be called before InitGoogleTest. void ConfigureStreamingOutput(); #endif // Performs initialization dependent upon flag values obtained in // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest // this function is also called from RunAllTests. Since this function can be // called more than once, it has to be idempotent. void PostFlagParsingInit(); // Gets the random seed used at the start of the current test iteration. int random_seed() const { return random_seed_; } // Gets the random number generator. internal::Random* random() { return &random_; } // Shuffles all test suites, and the tests within each test suite, // making sure that death tests are still run first. void ShuffleTests(); // Restores the test suites and tests to their order before the first shuffle. void UnshuffleTests(); // Returns the value of GTEST_FLAG(catch_exceptions) at the moment // UnitTest::Run() starts. bool catch_exceptions() const { return catch_exceptions_; } private: friend class ::testing::UnitTest; // Used by UnitTest::Run() to capture the state of // GTEST_FLAG(catch_exceptions) at the moment it starts. void set_catch_exceptions(bool value) { catch_exceptions_ = value; } // The UnitTest object that owns this implementation object. UnitTest* const parent_; // The working directory when the first TEST() or TEST_F() was // executed. internal::FilePath original_working_dir_; // The default test part result reporters. DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; DefaultPerThreadTestPartResultReporter default_per_thread_test_part_result_reporter_; // Points to (but doesn't own) the global test part result reporter. TestPartResultReporterInterface* global_test_part_result_repoter_; // Protects read and write access to global_test_part_result_reporter_. internal::Mutex global_test_part_result_reporter_mutex_; // Points to (but doesn't own) the per-thread test part result reporter. internal::ThreadLocal<TestPartResultReporterInterface*> per_thread_test_part_result_reporter_; // The vector of environments that need to be set-up/torn-down // before/after the tests are run. std::vector<Environment*> environments_; // The vector of TestSuites in their original order. It owns the // elements in the vector. std::vector<TestSuite*> test_suites_; // Provides a level of indirection for the test suite list to allow // easy shuffling and restoring the test suite order. The i-th // element of this vector is the index of the i-th test suite in the // shuffled order. std::vector<int> test_suite_indices_; // ParameterizedTestRegistry object used to register value-parameterized // tests. internal::ParameterizedTestSuiteRegistry parameterized_test_registry_; internal::TypeParameterizedTestSuiteRegistry type_parameterized_test_registry_; // The set holding the name of parameterized // test suites that may go uninstantiated. std::set<std::string> ignored_parameterized_test_suites_; // Indicates whether RegisterParameterizedTests() has been called already. bool parameterized_tests_registered_; // Index of the last death test suite registered. Initially -1. int last_death_test_suite_; // This points to the TestSuite for the currently running test. It // changes as Google Test goes through one test suite after another. // When no test is running, this is set to NULL and Google Test // stores assertion results in ad_hoc_test_result_. Initially NULL. TestSuite* current_test_suite_; // This points to the TestInfo for the currently running test. It // changes as Google Test goes through one test after another. When // no test is running, this is set to NULL and Google Test stores // assertion results in ad_hoc_test_result_. Initially NULL. TestInfo* current_test_info_; // Normally, a user only writes assertions inside a TEST or TEST_F, // or inside a function called by a TEST or TEST_F. Since Google // Test keeps track of which test is current running, it can // associate such an assertion with the test it belongs to. // // If an assertion is encountered when no TEST or TEST_F is running, // Google Test attributes the assertion result to an imaginary "ad hoc" // test, and records the result in ad_hoc_test_result_. TestResult ad_hoc_test_result_; // The list of event listeners that can be used to track events inside // Google Test. TestEventListeners listeners_; // The OS stack trace getter. Will be deleted when the UnitTest // object is destructed. By default, an OsStackTraceGetter is used, // but the user can set this field to use a custom getter if that is // desired. OsStackTraceGetterInterface* os_stack_trace_getter_; // True if and only if PostFlagParsingInit() has been called. bool post_flag_parse_init_performed_; // The random number seed used at the beginning of the test run. int random_seed_; // Our random number generator. internal::Random random_; // The time of the test program start, in ms from the start of the // UNIX epoch. TimeInMillis start_timestamp_; // How long the test took to run, in milliseconds. TimeInMillis elapsed_time_; #if GTEST_HAS_DEATH_TEST // The decomposed components of the gtest_internal_run_death_test flag, // parsed when RUN_ALL_TESTS is called. std::unique_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_; std::unique_ptr<internal::DeathTestFactory> death_test_factory_; #endif // GTEST_HAS_DEATH_TEST // A per-thread stack of traces created by the SCOPED_TRACE() macro. internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_; // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() // starts. bool catch_exceptions_; GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); }; // class UnitTestImpl // Convenience function for accessing the global UnitTest // implementation object. inline UnitTestImpl* GetUnitTestImpl() { return UnitTest::GetInstance()->impl(); } #if GTEST_USES_SIMPLE_RE // Internal helper functions for implementing the simple regular // expression matcher. GTEST_API_ bool IsInSet(char ch, const char* str); GTEST_API_ bool IsAsciiDigit(char ch); GTEST_API_ bool IsAsciiPunct(char ch); GTEST_API_ bool IsRepeat(char ch); GTEST_API_ bool IsAsciiWhiteSpace(char ch); GTEST_API_ bool IsAsciiWordChar(char ch); GTEST_API_ bool IsValidEscape(char ch); GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); GTEST_API_ bool ValidateRegex(const char* regex); GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); GTEST_API_ bool MatchRepetitionAndRegexAtHead( bool escaped, char ch, char repeat, const char* regex, const char* str); GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); #endif // GTEST_USES_SIMPLE_RE // Parses the command line for Google Test flags, without initializing // other parts of Google Test. GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); #if GTEST_HAS_DEATH_TEST // Returns the message describing the last system error, regardless of the // platform. GTEST_API_ std::string GetLastErrnoDescription(); // Attempts to parse a string into a positive integer pointed to by the // number parameter. Returns true if that is possible. // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use // it here. template <typename Integer> bool ParseNaturalNumber(const ::std::string& str, Integer* number) { // Fail fast if the given string does not begin with a digit; // this bypasses strtoXXX's "optional leading whitespace and plus // or minus sign" semantics, which are undesirable here. if (str.empty() || !IsDigit(str[0])) { return false; } errno = 0; char* end; // BiggestConvertible is the largest integer type that system-provided // string-to-number conversion routines can return. using BiggestConvertible = unsigned long long; // NOLINT const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); // NOLINT const bool parse_success = *end == '\0' && errno == 0; GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); const Integer result = static_cast<Integer>(parsed); if (parse_success && static_cast<BiggestConvertible>(result) == parsed) { *number = result; return true; } return false; } #endif // GTEST_HAS_DEATH_TEST // TestResult contains some private methods that should be hidden from // Google Test user but are required for testing. This class allow our tests // to access them. // // This class is supplied only for the purpose of testing Google Test's own // constructs. Do not use it in user tests, either directly or indirectly. class TestResultAccessor { public: static void RecordProperty(TestResult* test_result, const std::string& xml_element, const TestProperty& property) { test_result->RecordProperty(xml_element, property); } static void ClearTestPartResults(TestResult* test_result) { test_result->ClearTestPartResults(); } static const std::vector<testing::TestPartResult>& test_part_results( const TestResult& test_result) { return test_result.test_part_results(); } }; #if GTEST_CAN_STREAM_RESULTS_ // Streams test results to the given port on the given host machine. class StreamingListener : public EmptyTestEventListener { public: // Abstract base class for writing strings to a socket. class AbstractSocketWriter { public: virtual ~AbstractSocketWriter() {} // Sends a string to the socket. virtual void Send(const std::string& message) = 0; // Closes the socket. virtual void CloseConnection() {} // Sends a string and a newline to the socket. void SendLn(const std::string& message) { Send(message + "\n"); } }; // Concrete class for actually writing strings to a socket. class SocketWriter : public AbstractSocketWriter { public: SocketWriter(const std::string& host, const std::string& port) : sockfd_(-1), host_name_(host), port_num_(port) { MakeConnection(); } ~SocketWriter() override { if (sockfd_ != -1) CloseConnection(); } // Sends a string to the socket. void Send(const std::string& message) override { GTEST_CHECK_(sockfd_ != -1) << "Send() can be called only when there is a connection."; const auto len = static_cast<size_t>(message.length()); if (write(sockfd_, message.c_str(), len) != static_cast<ssize_t>(len)) { GTEST_LOG_(WARNING) << "stream_result_to: failed to stream to " << host_name_ << ":" << port_num_; } } private: // Creates a client socket and connects to the server. void MakeConnection(); // Closes the socket. void CloseConnection() override { GTEST_CHECK_(sockfd_ != -1) << "CloseConnection() can be called only when there is a connection."; close(sockfd_); sockfd_ = -1; } int sockfd_; // socket file descriptor const std::string host_name_; const std::string port_num_; GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); }; // class SocketWriter // Escapes '=', '&', '%', and '\n' characters in str as "%xx". static std::string UrlEncode(const char* str); StreamingListener(const std::string& host, const std::string& port) : socket_writer_(new SocketWriter(host, port)) { Start(); } explicit StreamingListener(AbstractSocketWriter* socket_writer) : socket_writer_(socket_writer) { Start(); } void OnTestProgramStart(const UnitTest& /* unit_test */) override { SendLn("event=TestProgramStart"); } void OnTestProgramEnd(const UnitTest& unit_test) override { // Note that Google Test current only report elapsed time for each // test iteration, not for the entire test program. SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); // Notify the streaming server to stop. socket_writer_->CloseConnection(); } void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) override { SendLn("event=TestIterationStart&iteration=" + StreamableToString(iteration)); } void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) override { SendLn("event=TestIterationEnd&passed=" + FormatBool(unit_test.Passed()) + "&elapsed_time=" + StreamableToString(unit_test.elapsed_time()) + "ms"); } // Note that "event=TestCaseStart" is a wire format and has to remain // "case" for compatibility void OnTestCaseStart(const TestCase& test_case) override { SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); } // Note that "event=TestCaseEnd" is a wire format and has to remain // "case" for compatibility void OnTestCaseEnd(const TestCase& test_case) override { SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + "ms"); } void OnTestStart(const TestInfo& test_info) override { SendLn(std::string("event=TestStart&name=") + test_info.name()); } void OnTestEnd(const TestInfo& test_info) override { SendLn("event=TestEnd&passed=" + FormatBool((test_info.result())->Passed()) + "&elapsed_time=" + StreamableToString((test_info.result())->elapsed_time()) + "ms"); } void OnTestPartResult(const TestPartResult& test_part_result) override { const char* file_name = test_part_result.file_name(); if (file_name == nullptr) file_name = ""; SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + "&line=" + StreamableToString(test_part_result.line_number()) + "&message=" + UrlEncode(test_part_result.message())); } private: // Sends the given message and a newline to the socket. void SendLn(const std::string& message) { socket_writer_->SendLn(message); } // Called at the start of streaming to notify the receiver what // protocol we are using. void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } std::string FormatBool(bool value) { return value ? "1" : "0"; } const std::unique_ptr<AbstractSocketWriter> socket_writer_; GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); }; // class StreamingListener #endif // GTEST_CAN_STREAM_RESULTS_ } // namespace internal } // namespace testing GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 #endif // GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_
contract_delegate_storage.mli
(** This module deals with the delegates of a contract. It is responsible for maintaining the tables {!Storage.Contract.Delegate} and {!Storage.Contract.Delegated}. *) type error += | (* `Permanent *) Forbidden_tz4_delegate of Bls.Public_key_hash.t (** Delegates cannot be tz4 accounts (i.e. BLS public key hashes). This error is returned when we try to register such a delegate. *) (** [check_not_tz4 pkh] checks that [pkh] is not a BLS address. *) val check_not_tz4 : Signature.public_key_hash -> unit tzresult (** [find ctxt contract] returns the delegate associated to [contract], or [None] if [contract] has no delegate. *) val find : Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t option tzresult Lwt.t (** [init ctxt contract delegate] sets the [delegate] associated to [contract]. This function assumes that [contract] does not have a delegate already. *) val init : Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t -> Raw_context.t tzresult Lwt.t (** [unlink ctxt contract] removes [contract] from the list of contracts that delegated to [find ctxt contract], i.e. the output of [delegated_contracts]. This function does not affect the value of the expression [find ctxt contract]. This function assumes that [contract] is allocated. *) val unlink : Raw_context.t -> Contract_repr.t -> Raw_context.t tzresult Lwt.t (** [delete ctxt contract] behaves as [unlink ctxt contract], but in addition removes the association of the [contract] to its current delegate, leaving the former without delegate. This function assumes that [contract] is allocated. *) val delete : Raw_context.t -> Contract_repr.t -> Raw_context.t tzresult Lwt.t (** [set ctxt contract delegate] updates the [delegate] associated to [contract]. This function assumes that [contract] is allocated and has a delegate. *) val set : Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t -> Raw_context.t tzresult Lwt.t (** [delegated_contracts ctxt delegate] returns the list of contracts (implicit or originated) that delegated to [delegate]. *) val delegated_contracts : Raw_context.t -> Signature.Public_key_hash.t -> Contract_repr.t list Lwt.t
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Nomadic Labs, <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
ko.c
void main() { char __tmp_0__ = 0; while(1) { switch(2) case 0: case 1: case 2: case 3: case 4: __tmp_0__ = 1 != 0; break; __tmp_0__ = 0; } }
symplectic_generic.ml
open Types module Make (M : Owl_types_ndarray_algodiff.Sig with type elt = float) = struct module C = Common.Make (M) type f_t = M.arr * M.arr -> float -> M.arr module M = struct include M (* TODO: implement this in owl *) let ( *$ ) = M.mul_scalar let ( + ) = M.add end let prepare step f (x0, p0) tspec () = let tspan, dt = match tspec with | T1 { t0; duration; dt } -> (t0, t0 +. duration), dt | T2 { tspan; dt } -> tspan, dt | T3 _ -> raise Owl_exception.(NOT_IMPLEMENTED "T3 not implemented") in let step = step f ~dt in C.symplectic_integrate ~step ~tspan ~dt (x0, p0) let symplectic_euler_s (f : f_t) ~dt (xs, ps) t0 = let t = t0 +. dt in let fxs = f (xs, ps) t in let ps' = M.(ps + (fxs *$ dt)) in let xs' = M.(xs + (ps' *$ dt)) in (xs', ps'), t let symplectic_euler = (module struct type state = M.arr * M.arr type f = M.arr * M.arr -> float -> M.arr type step_output = (M.arr * M.arr) * float type solve_output = M.arr * M.arr * M.arr let step = symplectic_euler_s let solve = prepare step end : Solver with type state = M.arr * M.arr and type f = M.arr * M.arr -> float -> M.arr and type step_output = (M.arr * M.arr) * float and type solve_output = M.arr * M.arr * M.arr) let leapfrog_s (f : f_t) ~dt (xs, ps) t0 = let t = t0 +. dt in let fxs = f (xs, ps) t in let xs' = M.(xs + (ps *$ dt) + (fxs *$ (dt *. dt *. 0.5))) in let fxs' = f (xs', ps) (t +. dt) in let ps' = M.(ps + ((fxs + fxs') *$ (dt *. 0.5))) in (xs', ps'), t let leapfrog = (module struct type state = M.arr * M.arr type f = M.arr * M.arr -> float -> M.arr type step_output = (M.arr * M.arr) * float type solve_output = M.arr * M.arr * M.arr let step = leapfrog_s let solve = prepare step end : Solver with type state = M.arr * M.arr and type f = M.arr * M.arr -> float -> M.arr and type step_output = (M.arr * M.arr) * float and type solve_output = M.arr * M.arr * M.arr) (* For the values used in the implementations below see Candy-Rozmus (https://www.sciencedirect.com/science/article/pii/002199919190299Z) and https://en.wikipedia.org/wiki/Symplectic_integrator *) let symint ~coeffs (f : f_t) ~dt = let symint_step ~coeffs f (xs, ps) t dt = List.fold_left (fun ((xs, ps), t) (ai, bi) -> let ps' = M.(ps + (f (xs, ps) t *$ (dt *. bi))) in let xs' = M.(xs + (ps' *$ (dt *. ai))) in let t = t +. (dt *. ai) in (xs', ps'), t) ((xs, ps), t) coeffs in fun (xs, ps) t -> symint_step ~coeffs f (xs, ps) t dt let leapfrog_c = [ 0.5, 0.0; 0.5, 1.0 ] let pseudoleapfrog_c = [ 1.0, 0.5; 0.0, 0.5 ] let ruth3_c = [ 2.0 /. 3.0, 7.0 /. 24.0; -2.0 /. 3.0, 0.75; 1.0, -1.0 /. 24.0 ] let ruth4_c = let c = 2.0 ** (1.0 /. 3.0) in [ 0.5, 0.0; 0.5 *. (1.0 -. c), 1.0; 0.5 *. (1.0 -. c), -.c; 0.5, 1.0 ] |> List.map (fun (v1, v2) -> v1 /. (2.0 -. c), v2 /. (2.0 -. c)) let _leapfrog_s' f ~dt = symint ~coeffs:leapfrog_c f ~dt let pseudoleapfrog_s f ~dt = symint ~coeffs:pseudoleapfrog_c f ~dt let pseudoleapfrog = (module struct type state = M.arr * M.arr type f = M.arr * M.arr -> float -> M.arr type step_output = (M.arr * M.arr) * float type solve_output = M.arr * M.arr * M.arr let step = pseudoleapfrog_s let solve = prepare step end : Solver with type state = M.arr * M.arr and type f = M.arr * M.arr -> float -> M.arr and type step_output = (M.arr * M.arr) * float and type solve_output = M.arr * M.arr * M.arr) let ruth3_s f ~dt = symint ~coeffs:ruth3_c f ~dt let ruth3 = (module struct type state = M.arr * M.arr type f = M.arr * M.arr -> float -> M.arr type step_output = (M.arr * M.arr) * float type solve_output = M.arr * M.arr * M.arr let step = ruth3_s let solve = prepare step end : Solver with type state = M.arr * M.arr and type f = M.arr * M.arr -> float -> M.arr and type step_output = (M.arr * M.arr) * float and type solve_output = M.arr * M.arr * M.arr) let ruth4_s f ~dt = symint ~coeffs:ruth4_c f ~dt let ruth4 = (module struct type state = M.arr * M.arr type f = M.arr * M.arr -> float -> M.arr type step_output = (M.arr * M.arr) * float type solve_output = M.arr * M.arr * M.arr let step = ruth4_s let solve = prepare step end : Solver with type state = M.arr * M.arr and type f = M.arr * M.arr -> float -> M.arr and type step_output = (M.arr * M.arr) * float and type solve_output = M.arr * M.arr * M.arr) (* (* XXX: We would like to do pint = so.fsolve( lambda pint: p - pint + 0.5*h*acc(x, pint, t0+i*h), p )[0] xnew = x + h*pint pnew = pint + 0.5*h*acc(xnew, pint, t0+(i+1)*h) sol[i+1] = np.array((pnew, xnew)) but http://ocaml.xyz/apidoc/owl_M.arrhs_root.html does not seem powerful enough for that in general. *) let leapfrog_implicit ~f y0 (t0, t1) dt = let _, elts = M.shape y0 in assert (M.s.is_even elts); let steps = steps t0 t1 dt in let sol = M.empty steps elts in sol.${[[0]]}<- y0; for idx = 1 to steps-1 do (* TODO *) () done; sol *) (* ----- helper functions ----- *) let to_state_array ?(axis = 0) (dim1, dim2) xs ps = let unpack = if axis = 0 then M.to_rows else if axis = 1 then M.to_cols else raise Owl_exception.INDEX_OUT_OF_BOUND in let xs = unpack xs in let ps = unpack ps in if M.numel xs.(0) <> dim1 * dim2 then raise Owl_exception.(DIFFERENT_SHAPE ([| M.numel xs.(0) |], [| dim1 * dim2 |])); if M.numel ps.(0) <> dim1 * dim2 then raise Owl_exception.(DIFFERENT_SHAPE ([| M.numel ps.(0) |], [| dim1 * dim2 |])); ( Array.map (fun x -> M.reshape x [| dim1; dim2 |]) xs , Array.map (fun p -> M.reshape p [| dim1; dim2 |]) ps ) end
(* * OWL - OCaml Scientific and Engineering Computing * OWL-ODE - Ordinary Differential Equation Solvers * * Copyright (c) 2019 Ta-Chu Kao <tck29@cam.ac.uk> * Copyright (c) 2019 Marcello Seri <m.seri@rug.nl> *)
doc_comments.mli
(** Manpages. See {!Cmdliner.Manpage}. *) type block = [ `S of string | `P of string | `Pre of string | `I of string * string | `Noblank | `Blocks of block list ] include M with type t := t (** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod *) val escape : string -> string (** [escape s] escapes [s] from the doc language. *) type title = string * int * string * string * string (** {1:standard-section-names Standard section names} *) val s_name : string (** {1:section-maps Section maps} Used for handling the merging of metadata doc strings. *) type smap val smap_append_block : smap -> sec:string -> block -> smap (** [smap_append_block smap sec b] appends [b] at the end of section [sec] creating it at the right place if needed. *) (** {1:content-boilerplate Content boilerplate} *) val s_environment_intro : block (** {1:output Output} *) type format = [`Auto | `Pager | `Plain | `Groff] val print : ?errs:Format.formatter -> ?subst:(string -> string option) -> format -> Format.formatter -> t -> unit (** {1:printers-and-escapes-used-by-cmdliner-module Printers and escapes used by Cmdliner module} *) val subst_vars : errs:Format.formatter -> subst:(string -> string option) -> Buffer.t -> string -> string (** [subst b ~subst s], using [b], substitutes in [s] variables of the form "$(doc)" by their [subst] definition. This leaves escapes and markup directives $(markup,...) intact. @raise Invalid_argument in case of illegal syntax. *) val doc_to_plain : errs:Format.formatter -> subst:(string -> string option) -> Buffer.t -> string -> string (** [doc_to_plain b ~subst s] using [b], subsitutes in [s] variables by their [subst] definition and renders cmdliner directives to plain text. @raise Invalid_argument in case of illegal syntax. *) val k : k (** this is a comment @author foo @author Foooooooooooooooooooooooooooooooooooo Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar @version foo @version Foooooooooooooooooooooooooooooooooooo Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar @see <foo> foo @see <https://slash-create.js.org/#/docs/main/latest/class/SlashCreator?scrollTo=registerCommandsIn> this url is very long @since foo @since Foooooooooooooooooooooooooooooooooooo.Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar @before foo [foo] @before Foooooooooooooooooooooooooooooooooooo.Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar Foo bar @deprecated [foo] @deprecated Foooooooooooooooooooooooooooooooooooo Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar Foo bar @param foo [foo] @param Foooooooooooooo_Baaaaaaaaaaaaar Fooooooooooo foooooooooooo fooooooooooo baaaaaaaaar @param Foooooooooooooooooooooooooooooooooooo_baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar Foo bar @raise foo [foo] @raise Foooooooooooooooooooooooooooooooooooo_baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar Foo bar @return [foo] @inline @canonical foo @canonical Foooooooooooooooooooooooooooooooooooo.Baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar *) val x : x (** a comment @version foo *) (** Managing Chunks. This module exposes functors to store raw contents into append-only stores as chunks of same size. It exposes the {{!AO} AO} functor which split the raw contents into [Data] blocks, addressed by [Node] blocks. That's the usual rope-like representation of strings, but chunk trees are always build as perfectly well-balanced and blocks are addressed by their hash (or by the stable keys returned by the underlying store). A chunk has the following structure: {v -------------------------- -------------------------- | uint8_t type | | uint8_t type | --------------------------- --------------------------- | uint16_t | | uint64_t | --------------------------- --------------------------- | key children[length] | | byte data[length] | --------------------------- --------------------------- v} [type] is either [Data] (0) or [Index] (1). If the chunk contains data, [length] is the payload length. Otherwise it is the number of children that the node has. It also exposes {{!AO_stable} AO_stable} which -- as {{!AO} AO} does -- stores raw contents into chunks of same size. But it also preserves the nice properpty that values are addressed by their hash. instead of by the hash of the root chunk node as it is the case for {{!AO} AO}. *) (** This is verbatim: {v o o /\ /\ /\ /\ v} This is preformated code: {[ let verbatim s = s |> String.split_lines |> List.map ~f:String.strip |> fun s -> list s "@," Fmt.str ]} *) (** Lists: list with short lines: - x - y - z list with long lines: - xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx - yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy - zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz enumerated list with long lines: + xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx + yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy yyy + zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz zzz list with sub lists: {ul {- xxx - a - b - c } {- yyy + a + b + c }} *) (** {{:https://github.com/} Github} *) (** {:https://github.com/} *) (** An array index offset: [exp1\[exp2\]] *) (** to extend \{foo syntax *) (** The different forms of references in \@see tags. *) (** Printf groff string for the \@before information. *) (** [a]'c [b]'s [c]'c *) (** return true if [\gamma(lhs) \subseteq \gamma(rhs)] *) (** Composition of functions: [(f >> g) x] is exactly equivalent to [g (f (x))]. Left associative. *) (** [†] [Struct_rec] is *) (** for [Global]s *) (** generic command: ∀xs.[foot]-[post] *) (** A *) val foo : int -> unit (** B *) (** C *) (** A *) val foo : int -> unit (** B *) module Foo : sig (** A *) val foo : int -> unit (** B *) (** C *) (** A *) val foo : int -> unit (** B *) end (** [\[ \] \[\] \]] *) (** \{ \} \[ \] \@ \@ *) (** @canonical Foo *) (** @canonical Module.Foo.Bar *) (** {v a v} *) (** {[ b ]} *) (** - Odoc don't parse multiple paragraph in a list *) (** {ul {- Abc Def } {- Hij } {- Klm {ul {- Nop Qrs } {- Tuv }} }} *) (** - {v Abc def v} - {[ A B ]} *) (** Code block {[ Single line ]} {[ Multi line ]} {[ Multi line with indentation ]} {[ Single long line HAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ]} {[ With empty line ]} {[ First line on the same line as opening ]} *) module X : sig (** {[ First line on the same line as opening ]} *) end (** {!module:A} {!module:A.B} {!module-type:A} {!module-type:A.b} {!class:c} {!class:M.c} {!class-type:c} {!class-type:M.c} {!val:x} {!val:M.x} {!type:t} {!type:M.t} {!exception:E} {!exception:M.E} {!method:m} {!method:c.m} {!constructor:C} {!constructor:M.C} {!field:f} {!field:t.f} {!field:M.t.f} *) (** {!modules:Foo} {!modules:Foo Bar.Baz} @canonical Foo @canonical Foo.Bar *) (** {%html:<p>Raw markup</p>%} {%Without language%} {%other:Other language%} *) (** [Multi Line] [ A lot of spaces ] [Very looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong] *) (** {[ for i = 1 to 3 do Printf.printf "let x%d = %d\n" i i done ]} *) (** {[ print_newline (); List.iter (fun s -> Printf.printf "let ( %s ) = Pervasives.( %s )\n" s s) ["+"; "-"; "*"; "/"] ]} *) (** {[ #use "import.cinaps";; List.iter all_fields ~f:(fun (name, type_) -> printf "\nexternal get_%s : unit -> %s = \"get_%s\"" name type_ name) ]} *) (** {[ List.iter all_fields ~f:(fun (name, type_) -> printf "\nexternal get_%s : unit -> %s = \"get_%s\"" name type_ name) ]} *) (** {[ let x = 1 in (* fooooooo *) let y = 2 in (* foooooooo *) z ]} *) (** {[ let this = is_short ]} {[ does not parse: verbatim +/+/+ /+/+/ +/+//+/+/+/+/+/+/+/ +/+/+ /+/+/ +/+//+/+/+/+/+/+/+/ +/+/+ /+/+/ +/+//+/+/+/+/+/+/+/ +/+/+ /+/+/ +/+//+/+ ]} {[ [@@@ocamlformat "break-separators = after"] let fooooooooooooooooo = [ foooooooooooooooooooooooooooooooo ; foooooooooooooooooooooooooooooooo ; foooooooooooooooooooooooooooooooo ] ]} {[ let fooooooooooooooooo = [ foooooooooooooooooooooooooooooooo ; foooooooooooooooooooooooooooooooo ; foooooooooooooooooooooooooooooooo ] ]} *) (** This is a comment with code inside {[ (** This is a comment with code inside [ let code inside = f inside ] *) let code inside (* comment *) = f inside ]} Code block with metadata: {@ocaml[ code ]} {@ocaml kind=toplevel[ code ]} {@ocaml kind=toplevel env=e1[ (** This is a comment with code inside [ let code inside = f inside ] *) let code inside (* comment *) = f inside ]} *) (** {e foooooooo oooooooooo ooooooooo ooooooooo} {i fooooooooooooo oooooooo oooooooooo} {b fooooooooooooo oooooooooooo oooooo ooooooo} *) (** {e foooooooo oooooooooo ooooooooo ooooooooo} {{!some ref} fooooooooooooo oooooooo oooooooooo} {b fooooooooooooo oooooooooooo oooooo ooooooo} *) (** foooooooooooooooooooooooooooooooooooooooooooooooooo foooooooooooo {b eee + eee eee} *) (** foooooooooooooooooooooooooooooooooooooooooooooooooo foooooooooooooooo {b + eee + eee eee} *) val f : int (***) val k : int (**) (** {e foooooooo oooooooooo ooooooooo ooooooooo {i fooooooooooooo oooooooo oooooooooo {b fooooooooooooo oooooooooooo oooooo ooooooo}}} *) (** {e {i fooooooooooooo oooooooo oooooooooo {b fooooooooooooo oooooooooooo oooooo ooooooo}} foooooooo oooooooooo ooooooooo ooooooooo} *) (** foooooooooo fooooooooooo {e foooooooo oooooooooo ooooooooo ooooooooo {i fooooooooooooo oooooooo oooooooooo {b fooooooooooooo oooooooooooo oooooo ooooooo}} fooooooooooooo foooooooooo fooooo {i fooooooooooooo oooooooo oooooooooo {b fooooooooooooo oooooooooooo oooooo ooooooo}}} {e foooooooo oooooooooo ooooooooo ooooooooo {i fooooooooooooo oooooooo oooooooooo}} fooooooooooooo foooooooooooooo: - foo - {e foooooooo oooooooooo ooooooooo ooooooooo {i fooooooooooooo oooooooo oooooooooo}} - {e foooooooo oooooooooo ooooooooo ooooooooo} {i fooooooooooooo oooooooo oooooooooo} - foo *) (** Brackets must not be escaped in the first argument of some tags: *) (** @raise [Invalid_argument] if the argument is [None]. Sometimes [t.[x]]. *) (** @author [Abc] [def] \[hij\] *) (** @author {Abc} {def} \{hij\} *) (** @param [id] [def] \[hij\] *) (** @raise [exn] [def] \[hij\] *) (** @since [Abc] [def] \[hij\] *) (** @before [Abc] [def] \[hij\] *) (** @version [Abc] [def] \[hij\] *) (** @see <[Abc]> [def] \[hij\] *) (** @see '[Abc]' [def] \[hij\] *) (** @see "[Abc]" [def] \[hij\] *) (** \[abc\] *) (** *) (** *) (** [trim " "] is [""] *) (** [trms (c × (Σᵢ₌₁ⁿ cᵢ × Πⱼ₌₁ᵐᵢ Xᵢⱼ^pᵢⱼ))] is the sequence of terms [Xᵢⱼ] for each [i] and [j]. *) (** Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi lacinia odio sit amet lobortis fringilla. Mauris diam massa, vulputate sit amet lacus id, vestibulum bibendum lectus. Nullam tristique justo nisi, gravida dapibus mi pulvinar at. Suspendisse pellentesque odio quis ipsum tempor luctus. Cras ultrices, magna sit amet faucibus molestie, sapien dolor ullamcorper lorem, vel viverra tortor augue vel massa. Suspendisse nunc nisi, consequat et ante nec, efficitur dapibus ipsum. Aenean vitae pellentesque odio. Integer et ornare tellus, at tristique elit. Phasellus et nisi id neque ultrices vestibulum vitae non tortor. Mauris aliquet at risus sed rhoncus. Ut condimentum rhoncus orci, sit amet eleifend erat tempus quis. *) (** {[(* a b *)]} *) val a : fooooooooooooooooooooooooooo (** {[(* a b *)]} *) -> fooooooooooooooooooooooooo type x = { a : t (** {[(* a b *)]} *) ; b : [` A (** {[(* a b *)]} *) ] } type x = | A of a (** {[(* a b *)]} *) | B of b (** {[(* a b *)]} *) (** Set a different language name in the block metadata to not format as OCaml: {@sh[ echo "this""is""only""a""single"(echo word)(echo also) ]} *) (**a*)(**b*) (** Inline math: {m \infty} Inline math elements can wrap as well {m \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty \infty} or {m \f\relax{x} = \int_{-\infty}^\infty \f\hat\xi\,e^{2 \pi i \xi x} \,d\xi}. Block math: {math \infty} {math \infty } {math \pi } {math \infty \pi \pi \pi } {math {m \f\relax{x} = \int_{-\infty}^\infty \f\hat\xi\,e^{2 \pi i \xi x} \,d\xi}} {math % \f is defined as #1f(#2) using the macro \f\relax{x} = \int_{-\infty}^\infty \f\hat\xi\,e^{2 \pi i \xi x} \,d\xi } *)
(** Manpages. See {!Cmdliner.Manpage}. *)
token.ml
type container = [ `Contract of Contract_repr.t | `Collected_commitments of Blinded_public_key_hash.t | `Delegate_balance of Signature.Public_key_hash.t | `Frozen_deposits of Signature.Public_key_hash.t | `Block_fees | `Frozen_bonds of Contract_repr.t * Bond_id_repr.t ] type infinite_source = [ `Invoice | `Bootstrap | `Initial_commitments | `Revelation_rewards | `Double_signing_evidence_rewards | `Endorsing_rewards | `Baking_rewards | `Baking_bonuses | `Minted | `Liquidity_baking_subsidies | `Tx_rollup_rejection_rewards ] type source = [infinite_source | container] type infinite_sink = [ `Storage_fees | `Double_signing_punishments | `Lost_endorsing_rewards of Signature.Public_key_hash.t * bool * bool | `Tx_rollup_rejection_punishments | `Burned ] type sink = [infinite_sink | container] let allocated ctxt stored = match stored with | `Contract contract -> Contract_storage.allocated ctxt contract >|=? fun allocated -> (ctxt, allocated) | `Collected_commitments bpkh -> Commitment_storage.exists ctxt bpkh >|= ok >|=? fun allocated -> (ctxt, allocated) | `Delegate_balance delegate -> let contract = Contract_repr.implicit_contract delegate in Contract_storage.allocated ctxt contract >|=? fun allocated -> (ctxt, allocated) | `Frozen_deposits delegate -> let contract = Contract_repr.implicit_contract delegate in Frozen_deposits_storage.allocated ctxt contract >|= fun allocated -> ok (ctxt, allocated) | `Block_fees -> return (ctxt, true) | `Frozen_bonds (contract, bond_id) -> Contract_storage.bond_allocated ctxt contract bond_id let balance ctxt stored = match stored with | `Contract contract -> Contract_storage.get_balance ctxt contract >|=? fun balance -> (ctxt, balance) | `Collected_commitments bpkh -> Commitment_storage.committed_amount ctxt bpkh >|=? fun balance -> (ctxt, balance) | `Delegate_balance delegate -> let contract = Contract_repr.implicit_contract delegate in Storage.Contract.Spendable_balance.get ctxt contract >|=? fun balance -> (ctxt, balance) | `Frozen_deposits delegate -> let contract = Contract_repr.implicit_contract delegate in Frozen_deposits_storage.find ctxt contract >|=? fun frozen_deposits -> let balance = match frozen_deposits with | None -> Tez_repr.zero | Some frozen_deposits -> frozen_deposits.current_amount in (ctxt, balance) | `Block_fees -> return (ctxt, Raw_context.get_collected_fees ctxt) | `Frozen_bonds (contract, bond_id) -> Contract_storage.find_bond ctxt contract bond_id >|=? fun (ctxt, balance_opt) -> (ctxt, Option.value ~default:Tez_repr.zero balance_opt) let credit ctxt dest amount origin = let open Receipt_repr in (match dest with | #infinite_sink as infinite_sink -> let sink = match infinite_sink with | `Storage_fees -> Storage_fees | `Double_signing_punishments -> Double_signing_punishments | `Lost_endorsing_rewards (d, p, r) -> Lost_endorsing_rewards (d, p, r) | `Tx_rollup_rejection_punishments -> Tx_rollup_rejection_punishments | `Burned -> Burned in return (ctxt, sink) | #container as container -> ( match container with | `Contract dest -> Contract_storage.credit_only_call_from_token ctxt dest amount >|=? fun ctxt -> (ctxt, Contract dest) | `Collected_commitments bpkh -> Commitment_storage.increase_commitment_only_call_from_token ctxt bpkh amount >|=? fun ctxt -> (ctxt, Commitments bpkh) | `Delegate_balance delegate -> let contract = Contract_repr.implicit_contract delegate in Contract_storage.increase_balance_only_call_from_token ctxt contract amount >|=? fun ctxt -> (ctxt, Contract contract) | `Frozen_deposits delegate as dest -> allocated ctxt dest >>=? fun (ctxt, allocated) -> (if not allocated then Frozen_deposits_storage.init ctxt delegate else return ctxt) >>=? fun ctxt -> Frozen_deposits_storage.credit_only_call_from_token ctxt delegate amount >|=? fun ctxt -> (ctxt, Deposits delegate) | `Block_fees -> Raw_context.credit_collected_fees_only_call_from_token ctxt amount >>?= fun ctxt -> return (ctxt, Block_fees) | `Frozen_bonds (contract, bond_id) -> Contract_storage.credit_bond_only_call_from_token ctxt contract bond_id amount >>=? fun ctxt -> return (ctxt, Frozen_bonds (contract, bond_id)))) >|=? fun (ctxt, balance) -> (ctxt, (balance, Credited amount, origin)) let spend ctxt src amount origin = let open Receipt_repr in (match src with | #infinite_source as infinite_source -> let src = match infinite_source with | `Bootstrap -> Bootstrap | `Invoice -> Invoice | `Initial_commitments -> Initial_commitments | `Minted -> Minted | `Liquidity_baking_subsidies -> Liquidity_baking_subsidies | `Revelation_rewards -> Nonce_revelation_rewards | `Double_signing_evidence_rewards -> Double_signing_evidence_rewards | `Endorsing_rewards -> Endorsing_rewards | `Baking_rewards -> Baking_rewards | `Baking_bonuses -> Baking_bonuses | `Tx_rollup_rejection_rewards -> Tx_rollup_rejection_rewards in return (ctxt, src) | #container as container -> ( match container with | `Contract src -> Contract_storage.spend_only_call_from_token ctxt src amount >|=? fun ctxt -> (ctxt, Contract src) | `Collected_commitments bpkh -> Commitment_storage.decrease_commitment_only_call_from_token ctxt bpkh amount >|=? fun ctxt -> (ctxt, Commitments bpkh) | `Delegate_balance delegate -> let contract = Contract_repr.implicit_contract delegate in Contract_storage.decrease_balance_only_call_from_token ctxt contract amount >|=? fun ctxt -> (ctxt, Contract contract) | `Frozen_deposits delegate -> Frozen_deposits_storage.spend_only_call_from_token ctxt delegate amount >|=? fun ctxt -> (ctxt, Deposits delegate) | `Block_fees -> Raw_context.spend_collected_fees_only_call_from_token ctxt amount >>?= fun ctxt -> return (ctxt, Block_fees) | `Frozen_bonds (contract, bond_id) -> Contract_storage.spend_bond_only_call_from_token ctxt contract bond_id amount >>=? fun ctxt -> return (ctxt, Frozen_bonds (contract, bond_id)))) >|=? fun (ctxt, balance) -> (ctxt, (balance, Debited amount, origin)) let transfer_n ?(origin = Receipt_repr.Block_application) ctxt src dest = let sources = List.filter (fun (_, am) -> Tez_repr.(am <> zero)) src in match sources with | [] -> (* Avoid accessing context data when there is nothing to transfer. *) return (ctxt, []) | _ :: _ -> (* Withdraw from sources. *) List.fold_left_es (fun (ctxt, total, debit_logs) (source, amount) -> spend ctxt source amount origin >>=? fun (ctxt, debit_log) -> Tez_repr.(amount +? total) >>?= fun total -> return (ctxt, total, debit_log :: debit_logs)) (ctxt, Tez_repr.zero, []) sources >>=? fun (ctxt, amount, debit_logs) -> credit ctxt dest amount origin >>=? fun (ctxt, credit_log) -> (* Deallocate implicit contracts with no stake. This must be done after spending and crediting. If done in between then a transfer of all the balance from (`Contract c) to (`Frozen_bonds (c,_)) would leave the contract c unallocated. *) List.fold_left_es (fun ctxt (source, _amount) -> match source with | `Contract contract | `Frozen_bonds (contract, _) -> Contract_storage.ensure_deallocated_if_empty ctxt contract | #source -> return ctxt) ctxt sources >|=? fun ctxt -> (* Make sure the order of balance updates is : debit logs in the order of of the parameter [src], and then the credit log. *) let balance_updates = List.rev (credit_log :: debit_logs) in (ctxt, balance_updates) let transfer ?(origin = Receipt_repr.Block_application) ctxt src dest amount = transfer_n ~origin ctxt [(src, amount)] dest
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2020-2021 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
hkdf_test.ml
open SharedDefs open Test_utils type 'a hkdf_test = { alg: HashDefs.alg; name: string ; ikm: 'a; salt: 'a; info: 'a; expected_prk: 'a; expected_okm: 'a } let tests = [ { alg = SHA2_256; name = "Test 1"; ikm = Bytes.of_string "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"; salt = Bytes.of_string "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c"; info = Bytes.of_string "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9"; expected_prk = Bytes.of_string "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5"; expected_okm = Bytes.of_string "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65" } ] module MakeTests (M: SharedDefs.HKDF) = struct let test_noalloc (v: Bytes.t hkdf_test) name alg = if v.alg = alg then let test_result = test_result (name ^ " (noalloc) " ^ v.name) in let prk = Test_utils.init_bytes (Bytes.length v.expected_prk) in let okm = Test_utils.init_bytes (Bytes.length v.expected_okm) in M.Noalloc.extract ~salt:v.salt ~ikm:v.ikm ~prk; if not (Bytes.equal prk v.expected_prk) then test_result Failure "PRK mismatch"; M.Noalloc.expand ~prk ~info:v.info ~okm; if not (Bytes.equal okm v.expected_okm) then test_result Failure "OKM mismatch"; if Bytes.equal prk v.expected_prk && Bytes.equal okm v.expected_okm then test_result Success "" let test (v: Bytes.t hkdf_test) name alg = let test_result = test_result (name ^ " " ^ v.name) in let prk = M.extract ~salt:v.salt ~ikm:v.ikm in let okm = M.expand ~prk ~info:v.info ~size:(Bytes.length v.expected_okm) in if alg = v.alg then begin if not (Bytes.equal prk v.expected_prk) then test_result Failure "PRK mismatch"; if not (Bytes.equal okm v.expected_okm) then test_result Failure "OKM mismatch"; if Bytes.equal prk v.expected_prk && Bytes.equal okm v.expected_okm then test_result Success "" end else test_result Success "function calls" let run_tests name alg = List.iter (fun v -> test v name alg) tests; List.iter (fun v -> test_noalloc v name alg) tests end let test_agile (v: Bytes.t hkdf_test) = let test_result = test_result ("Agile EverCrypt.HKDF with " ^ v.name) in let prk = Test_utils.init_bytes (Bytes.length v.expected_prk) in let okm = Test_utils.init_bytes (Bytes.length v.expected_okm) in if EverCrypt.HMAC.is_supported_alg ~alg:v.alg then begin EverCrypt.HKDF.Noalloc.extract ~alg:v.alg ~salt:v.salt ~ikm:v.ikm ~prk; if not (Bytes.equal prk v.expected_prk) then test_result Failure "PRK mismatch"; EverCrypt.HKDF.Noalloc.expand ~alg:v.alg ~prk ~info:v.info ~okm; if not (Bytes.equal okm v.expected_okm) then test_result Failure "OKM mismatch"; if Bytes.equal prk v.expected_prk && Bytes.equal okm v.expected_okm then test_result Success "" end else test_result Failure "hash algorithm reported as not supported" (* TODO: find tests for the other hash functions Only HKDF_SHA2_256 is currently covered by a unit tests. As a sanity check, function calls for all the other versions are being exercised, but their output is not checked. *) let _ = List.iter test_agile tests; let module Tests = MakeTests (Hacl.HKDF_SHA2_256) in Tests.run_tests "Hacl.HKDF_SHA2_256" SHA2_256; let module Tests = MakeTests (Hacl.HKDF_SHA2_512) in Tests.run_tests "Hacl.HKDF_SHA2_512" SHA2_512; let module Tests = MakeTests (Hacl.HKDF_BLAKE2b) in Tests.run_tests "Hacl.HKDF_BLAKE2b" BLAKE2b; let module Tests = MakeTests (Hacl.HKDF_BLAKE2s) in Tests.run_tests "Hacl.HKDF_BLAKE2s" BLAKE2s
zar.ml
(** Here we test the conversion functions z_of_int and int_of_z and check that samplers can build and run successfully. *) open Alcotest open QCheck_alcotest open Zar__Core open Zar__Internal open Zar let rec string_of_list (sep : string) (to_string : 'a -> string) = function | [] -> "" | x :: xs -> to_string x ^ sep ^ string_of_list sep to_string xs (** Check that samplers can build and run. *) let () = Zar.seed (); print_endline @@ string_of_list " " string_of_bool @@ Zar.take 10 @@ Zar.bits (); (* Coin. *) let coin = Zar.coin 2 3 in print_endline @@ string_of_bool @@ first coin; print_endline @@ string_of_bool @@ first (rest coin); print_endline @@ string_of_bool @@ first (rest (rest coin)); print_endline @@ string_of_bool @@ first (rest (rest (rest coin))); print_endline @@ string_of_list " " string_of_bool @@ Zar.take 10 coin; (* Die. *) let die = Zar.die 100 in print_endline @@ string_of_int @@ first die; print_endline @@ string_of_list " " string_of_int @@ Zar.take 10 die; (* Findist. *) let findist = Zar.findist [1; 2; 3; 4; 5] in print_endline @@ string_of_int @@ first findist; print_endline @@ string_of_list " " string_of_int @@ Zar.take 10 findist (** Number of samples per QCheck test. *) let gen_count = 10000 (** Lower and upper bounds on randomly generated ints. Divide by 2 to prevent overflow in 'double_z_of_int' and 'z_of_int_plus' tests. *) let bound = Int.max_int / 2 let tests = ref [] let add_test nm t = tests := !tests @ [Alcotest.test_case nm `Quick t] let add_qcheck t = tests := !tests @ [to_alcotest t] let rec string_of_positive : positive -> string = function | XH -> "XH" | XO p -> "XO " ^ string_of_positive p | XI p -> "XI " ^ string_of_positive p let string_of_z : z -> string = function | Z0 -> "Z0" | Zpos p -> "Zpos " ^ string_of_positive p | Zneg p -> "Zneg " ^ string_of_positive p let z : z testable = let pp_z ppf x = Fmt.pf ppf "%s" (string_of_z x) in testable pp_z ( = ) let pos_gen = let open QCheck.Gen in sized_size (int_bound @@ Sys.word_size - 3) @@ fix (fun self n -> match n with | 0 -> return XH | _ -> oneof [map (fun x -> XO x) (self (n - 1)); map (fun x -> XI x) (self (n - 1))]) let z_gen = let open QCheck.Gen in frequency [1, return Z0; 100, map (fun x -> Zpos x) pos_gen; 100, map (fun x -> Zneg x) pos_gen] let arbitrary_z = let open QCheck.Iter in let shrink_positive = function | XH -> QCheck.Iter.empty | XO p -> QCheck.Iter.return p | XI p -> QCheck.Iter.return p in let shrink_z = function | Z0 -> empty | Zpos p -> let* x = shrink_positive p in return (Zpos x) | Zneg p -> let* x = shrink_positive p in return (Zneg x) in QCheck.make z_gen ~print:string_of_z ~shrink:shrink_z (** A few positives for testing. *) let one = XH let two = XO one let three = XI one let four = XO two let five = XI two let () = add_test "int_of_z" @@ fun _ -> (check int) "" 0 (int_of_z Z0); (check int) "" 1 (int_of_z (Zpos one)); (check int) "" 2 (int_of_z (Zpos two)); (check int) "" 3 (int_of_z (Zpos three)); (check int) "" 4 (int_of_z (Zpos four)); (check int) "" 5 (int_of_z (Zpos five)); (check int) "" (-1) (int_of_z (Zneg one)); (check int) "" (-2) (int_of_z (Zneg two)); (check int) "" (-3) (int_of_z (Zneg three)); (check int) "" (-4) (int_of_z (Zneg four)); (check int) "" (-5) (int_of_z (Zneg five)) let () = add_test "z_of_int" @@ fun _ -> (check z) "" Z0 (z_of_int 0); (check z) "" (Zpos one) (z_of_int 1); (check z) "" (Zpos two) (z_of_int 2); (check z) "" (Zpos three) (z_of_int 3); (check z) "" (Zpos four) (z_of_int 4); (check z) "" (Zpos five) (z_of_int 5); (check z) "" (Zneg one) (z_of_int (-1)); (check z) "" (Zneg two) (z_of_int (-2)); (check z) "" (Zneg three) (z_of_int (-3)); (check z) "" (Zneg four) (z_of_int (-4)); (check z) "" (Zneg five) (z_of_int (-5)) (** z_of_int ∘ int_of_z = id. *) let () = add_qcheck @@ QCheck.Test.make ~name:"z_of_int_int_of_z" ~count:gen_count arbitrary_z (fun n -> z_of_int (int_of_z n) = n) (** int_of_z ∘ z_of_int = id. *) let () = add_qcheck @@ QCheck.(Test.make ~name:"int_of_z_z_of_int" ~count:gen_count (int_range (-bound) bound) (fun n -> int_of_z (z_of_int n) = n)) (** ∀ n m, z_of_int n + z_of_int m = z_of_int (n + m). *) let () = add_qcheck @@ QCheck.(Test.make ~name:"z_of_int_plus" ~count:gen_count (pair (int_range (-bound) bound) (int_range (-bound) bound)) (fun (n, m) -> Z.add (z_of_int n) (z_of_int m) = z_of_int (n + m))) (** ∀ n m, int_of_z n + int_of_z m = int_of_z (n + m). *) let () = add_qcheck @@ QCheck.(Test.make ~name:"int_of_z_plus" ~count:gen_count (pair arbitrary_z arbitrary_z) (fun (n, m) -> int_of_z n + int_of_z m = int_of_z (Z.add n m))) (** ∀ n, int_of_z (double n) = 2 * int_of_z n. *) let () = add_qcheck @@ QCheck.(Test.make ~name:"int_of_z_double" ~count:gen_count arbitrary_z (fun n -> int_of_z (Z.double n) = 2 * int_of_z n)) (** ∀ n, double (z_of_int n) = z_of_int (2 * n). *) let () = add_qcheck @@ QCheck.(Test.make ~name:"double_z_of_int" ~count:gen_count (int_range (-bound) bound) (fun n -> Z.double (z_of_int n) = z_of_int (2 * n))) (** ∀ n m, ltb n m ⇔ int_of_z n < int_of_z m. *) let () = add_qcheck @@ QCheck.(Test.make ~name:"lt_int_of_z" ~count:gen_count (pair arbitrary_z arbitrary_z) (fun (n, m) -> Z.ltb n m == (int_of_z n < int_of_z m))) (** ∀ n m, n < m ⇔ ltb (z_of_int n) (z_of_int m). *) let () = add_qcheck @@ QCheck.(Test.make ~name:"z_of_int_lt" ~count:gen_count (pair (int_range (-bound) (bound)) (int_range (-bound) (bound))) (fun (n, m) -> n < m == Z.ltb (z_of_int n) (z_of_int m))) (* let () = *) (* let zs = QCheck.Gen.generate ~n:20 z_gen in *) (* print_endline @@ string_of_list "\n" string_of_z zs *) (** Run unit tests. *) let () = Alcotest.run "zar" [ "zar", !tests ]
(** Here we test the conversion functions z_of_int and int_of_z and check that samplers can build and run successfully. *)
test_arrays.mli
module Make : functor (Driver : Testable.Driver) -> sig val unittest: unit Alcotest.test end
test.ml
(* module SMap = struct include Map.Make (struct type t = string let compare = compare end) let pp pp_v fmt map = (* Format.fprintf fmt "{@[<2>@,%a@]@,}" *) (* ???? *) Format.fprintf fmt "{@[<-23>@,%a@]@,}" (Format.pp_print_list ~pp_sep:(fun fmt () -> Format.fprintf fmt ",@,") (fun fmt (k, v) -> (* Format.fprintf fmt "%s -> %s" k (sprintf_ "%a" pp_v v))) *) Format.fprintf fmt "%s: %a" k pp_v v)) (bindings map) let update_ k f m = update k (function None -> failwith "invalid" | Some v -> Some (f v)) m end *) module A = struct let one = 1 module C = struct let nested = 1 end end let a () = let e = 1 in 2 let () = let _ = let d = 3 in [%interact] in let b = 2 in let f a = [%interact] in [%interact]; f 2 module B = struct let inside = 2 let () = [%interact] end
(* module SMap = struct include Map.Make (struct type t = string let compare = compare end) let pp pp_v fmt map = (* Format.fprintf fmt "{@[<2>@,%a@]@,}" *) (* ???? *) Format.fprintf fmt "{@[<-23>@,%a@]@,}"
dune
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; ; OCamlFormat ; ; ; ; Copyright (c) Facebook, Inc. and its affiliates. ; ; ; ; This source code is licensed under the MIT license found in ; ; the LICENSE file in the root directory of this source tree. ; ; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (executable (name main) (public_name ocamlformat-rpc) (package ocamlformat) (flags (:standard -open Ocamlformat_stdlib)) (instrumentation (backend bisect_ppx)) (libraries ocamlformat.rpc)) (rule (with-stdout-to ocamlformat-rpc.1 (run ./main.exe --help=groff))) (install (section man) (files ocamlformat-rpc.1) (package ocamlformat))
eliom_extension_template.server.ml
(*****************************************************************************) (*****************************************************************************) (* This is an example of extension for Ocsigen *) (* Take this as a template for writing your own Eliom based extensions to the Web server *) (*****************************************************************************) (*****************************************************************************) let _ = Eliom_extension.register_eliom_extension (fun sp -> Lwt.return (Ocsigen_extensions.Ext_found (fun () -> let content = "Eliom Extension template page" in Ocsigen_senders.Text_content.result_of_content (content, "text/plain"))))
(* Ocsigen * http://www.ocsigen.org * Module extensiontemplate.ml * Copyright (C) 2007 Vincent Balat * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, with linking exception; * either version 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *)
grafana.ml
type config = { url : Uri.t; api_token : string option; data_source : string; timeout : float; } let config_of_json json = { url = JSON.(json |-> "url" |> as_string |> Uri.of_string); api_token = JSON.(json |-> "api_token" |> as_string_opt); data_source = JSON.(json |-> "data_source" |> as_string); timeout = JSON.(json |-> "timeout" |> as_float_opt |> Option.value ~default:20.); } type duration = | Seconds of int | Minutes of int | Hours of int | Days of int | Weeks of int | Month of int | Years of int let string_of_duration = let f value unit = string_of_int value ^ unit in function | Seconds x -> f x "s" | Minutes x -> f x "m" | Hours x -> f x "h" | Days x -> f x "d" | Weeks x -> f x "w" | Month x -> f x "M" | Years x -> f x "y" type alias = string type yaxis = {format : string; label : string option} type graph = { title : string; description : string; queries : (InfluxDB.select * alias option) list; interval : duration option; yaxis_1 : yaxis option; yaxis_2 : yaxis option; } type panel = Row of string | Graph of graph (* We use the [uid] and not the [id] because the [uid] can be stable between Grafana installs. *) type dashboard = { uid : string; title : string; description : string; panels : panel list; } let encode_target ((query : InfluxDB.select), alias) : JSON.u = let label = Option.map (fun a -> ("alias", `String a)) alias |> Option.to_list in `O (label @ [ ("query", `String (InfluxDB.show_select ~grafana:true query)); ("rawQuery", `Bool true); ("resultFormat", `String "time_series"); ]) let encode_yaxis = function | None -> `O [("show", `Bool false)] | Some {format; label} -> `O (("format", `String format) :: (match label with | None -> [] | Some label -> [("label", `String label)]) @ [("show", `Bool true)]) let encode_panel config y panel : JSON.u = match panel with | Row title -> `O [ ("type", `String "row"); ("title", `String title); ( "gridPos", `O [ ("h", `Float 1.); ("w", `Float 24.); ("x", `Float 0.); ( "y", `Float (let value = float !y in y := !y + 1 ; value) ); ] ); ] | Graph {title; description; queries; interval; yaxis_1; yaxis_2} -> let interval = Option.map (fun i -> ("interval", `String (string_of_duration i))) interval |> Option.to_list in let unit = match yaxis_1 with None -> "none" | Some x -> x.format in `O (interval @ [ ("type", `String "timeseries"); ("datasource", `String config.data_source); ("title", `String title); ("description", `String description); ( "gridPos", `O [ ("h", `Float 8.); ("w", `Float 24.); ("x", `Float 0.); ( "y", `Float (let value = float !y in y := !y + 8 ; value) ); ] ); ("targets", `A (List.map encode_target queries)); ("yaxes", `A [encode_yaxis yaxis_1; encode_yaxis yaxis_2]); ( "fieldConfig", `O [ ( "defaults", `O [ ( "custom", `O [ ("drawStyle", `String "line"); ("lineInterpolation", `String "linear"); ("showPoints", `String "always"); ("pointSize", `Float 5.); ("spanNulls", `Bool true); ("lineWidth", `Float 1.); ("fillOpacity", `Float 10.); ("axisSoftMin", `Float 0.); ] ); ("unit", `String unit); ] ); ] ); ]) let encode_dashboard config {uid; title; description; panels} : JSON.u = `O [ ("uid", `String uid); ("title", `String title); ("description", `String description); ("panels", `A (List.map (encode_panel config (ref 0)) panels)); ] let make_url {url; _} path = let path = let base_path = Uri.path url in if base_path <> "" && base_path.[String.length base_path - 1] = '/' then base_path ^ path else base_path ^ "/" ^ path in Uri.with_path url path let with_timeout {timeout; _} p = let timeout = let* () = Lwt_unix.sleep timeout in failwith "timeout" in Lwt.pick [p; timeout] let uid_rex = rex "[a-zA-Z0-9._-]{1,128}" type http_request = { uri : Uri.t; meth : Cohttp.Code.meth; headers : Cohttp.Header.t; body : Cohttp_lwt.Body.t option; } let http_call request config = with_timeout config @@ Cohttp_lwt_unix.Client.call ~headers:request.headers ?body:request.body request.meth request.uri let string_of_http_request request = let* body = Option.map (fun body -> Lwt.map (fun s -> ", Body: " ^ s) @@ Cohttp_lwt.Body.to_string body) request.body |> Option.value ~default:(Lwt.return "") in return @@ Format.sprintf "Uri: %s, Method: %s, Headers: [%s]%s" (Uri.to_string request.uri) (Cohttp.Code.string_of_method request.meth) (Cohttp.Header.to_string request.headers) body let handle_http_error resp_status resp_body request = let* body = Cohttp_lwt.Body.to_string resp_body in let* req = string_of_http_request request in failwith @@ sf "Grafana responded with %s - %s for request (%s) " (Cohttp.Code.string_of_status resp_status) body req let update_dashboard config dashboard = if dashboard.uid =~! uid_rex then invalid_arg (sf "Grafana.update_dashboard: invalid UID: %s (must match: %s)" dashboard.uid (show_rex uid_rex)) ; let authorization = Option.map (fun t -> ("Authorization", "Bearer " ^ t)) config.api_token |> Option.to_list in (* Delete so that we don't care about versions. *) let* () = let delete_request = { uri = make_url config ("dashboards/uid/" ^ dashboard.uid); meth = `DELETE; headers = Cohttp.Header.of_list authorization; body = None; } in let* (response, body) = http_call delete_request config in match response.status with | #Cohttp.Code.success_status | `Not_found -> Cohttp_lwt.Body.drain_body body | status -> handle_http_error status body delete_request in (* (Re-)create dashboard. *) let body = `O [("dashboard", encode_dashboard config dashboard)] |> JSON.encode_u in let create_request = { uri = make_url config "dashboards/db"; meth = `POST; headers = Cohttp.Header.of_list @@ ("Content-Type", "application/json") :: authorization; body = Option.some @@ Cohttp_lwt.Body.of_string body; } in let* (response, body) = http_call create_request config in match response.status with | #Cohttp.Code.success_status -> Cohttp_lwt.Body.drain_body body | status -> handle_http_error status body create_request let where_clause_of_tag (tag_name, tag_label) = InfluxDB.Tag (tag_name, EQ, tag_label) let where_clause_of_tags hd tail = List.fold_left (fun clause tag -> InfluxDB.And (clause, where_clause_of_tag tag)) (where_clause_of_tag hd) tail let simple_query ?(tags = []) ~measurement ~field ~test () = let where_clause = InfluxDB.And (Grafana_time_filter, where_clause_of_tags ("test", test) tags) in InfluxDB.select [Function (MEAN, Field field)] ~from:(Measurement measurement) ~where:where_clause ~group_by:(Time {interval = Grafana_interval; tag = None; fill = None}) let simple_graph ?title ?(description = "") ?(yaxis_format = "s") ?tags ?interval ~measurement ~field ~test () = let title = Option.value title ~default:measurement in Graph { title; description; queries = [(simple_query ?tags ~measurement ~field ~test (), None)]; interval; yaxis_1 = Some {format = yaxis_format; label = Some field}; yaxis_2 = None; } let graphs_per_tags ?title ?(description = "") ?(yaxis_format = "s") ?interval ~measurement ~field ~test ~tags () = let title = Option.value title ~default:measurement in let queries = List.map (fun (key, value) -> ( simple_query ~tags:[(key, value)] ~measurement ~field ~test (), Some (key ^ "=" ^ value) )) tags in Graph { title; description; queries; interval; yaxis_1 = Some {format = yaxis_format; label = Some field}; yaxis_2 = None; }
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
issue1116.ml
let some_int = 5 let x = "some_int"
stypes.mli
(* Recording and dumping (partial) type information *) (* Clflags.save_types must be true *) open Typedtree;; type annotation = | Ti_pat : 'k pattern_category * 'k general_pattern -> annotation | Ti_expr of expression | Ti_class of class_expr | Ti_mod of module_expr | An_call of Location.t * Annot.call | An_ident of Location.t * string * Annot.ident ;; val record : annotation -> unit;; val record_phrase : Location.t -> unit;; val dump : string option -> unit;; val get_location : annotation -> Location.t;; val get_info : unit -> annotation list;;
(**************************************************************************) (* *) (* OCaml *) (* *) (* Damien Doligez, projet Moscova, INRIA Rocquencourt *) (* *) (* Copyright 2003 Institut National de Recherche en Informatique et *) (* en Automatique. *) (* *) (* All rights reserved. This file is distributed under the terms of *) (* the GNU Lesser General Public License version 2.1, with the *) (* special exception on linking described in the file LICENSE. *) (* *) (**************************************************************************)
test_deriving_rpc.ml
(* All the ppx tests *) (* Check that t -/-> of_rpc but t -> t_of_rpc *) type t = int [@@deriving rpc] let _ = t_of_rpc let string_of_err = function | `Msg x -> x let check_marshal_unmarshal : 'a * Rpc.t * ('a -> Rpc.t) * (Rpc.t -> 'a) -> unit = fun (x, r, marshal, unmarshal) -> let r' = marshal x in let x' = unmarshal r in Alcotest.check (Testable.from_rpc_of_t marshal) "same after marshal->unmarshal" x x'; Alcotest.check Testable.rpc "marshalled stuff is same as specified rpc" r r' let check_unmarshal_error : (Rpc.t -> 'a) -> Rpc.t -> unit = fun unmarshal t -> let u = try Some (unmarshal t) with | _e -> None in match u with | Some _ -> Alcotest.fail "Expecting an error when unmarshalling" | None -> () let check_unmarshal_ok : 'a Alcotest.testable -> (Rpc.t -> 'a) -> 'a -> Rpc.t -> unit = fun testable unmarshal x r -> let x' = unmarshal r in Alcotest.check testable "unmarshaller returned expected value" x x' type test_int = int [@@deriving rpc] let test_int () = check_marshal_unmarshal (1, Rpc.Int 1L, rpc_of_test_int, test_int_of_rpc) let test_int_from_string () = check_unmarshal_ok Alcotest.int test_int_of_rpc 1 (Rpc.String "1") let test_bad_int () = check_unmarshal_error test_int_of_rpc Rpc.Null let test_bad_int_string () = check_unmarshal_error test_int_of_rpc (Rpc.String "tree") type test_int32 = int32 [@@deriving rpc] let test_int32 () = check_marshal_unmarshal (1l, Rpc.Int 1L, rpc_of_test_int32, test_int32_of_rpc) let test_int32_from_string () = check_unmarshal_ok Alcotest.int32 test_int32_of_rpc 1l (Rpc.String "1") let test_bad_int32 () = check_unmarshal_error test_int32_of_rpc (Rpc.Float 1.0) let test_bad_int32_string () = check_unmarshal_error test_int32_of_rpc (Rpc.String "moo") type test_int64 = int64 [@@deriving rpc] let test_int64 () = check_marshal_unmarshal (1L, Rpc.Int 1L, rpc_of_test_int64, test_int64_of_rpc) let test_int64_from_string () = check_unmarshal_ok Alcotest.int64 test_int64_of_rpc 1L (Rpc.String "1") let test_bad_int64 () = check_unmarshal_error test_int64_of_rpc (Rpc.Float 1.0) let test_bad_int64_string () = check_unmarshal_error test_int64_of_rpc (Rpc.String "hello") type test_unit = unit [@@deriving rpc] let test_unit () = check_marshal_unmarshal ((), Rpc.Null, rpc_of_test_unit, test_unit_of_rpc) let test_bad_unit () = check_unmarshal_error test_unit_of_rpc (Rpc.Int 1L) type test_string = string [@@deriving rpc] let test_string () = check_marshal_unmarshal ("test string", Rpc.String "test string", rpc_of_test_string, test_string_of_rpc) let test_bad_string () = check_unmarshal_error test_string_of_rpc (Rpc.Int 1L) type test_float = float [@@deriving rpc] let check_unmarshal_float_ok = check_unmarshal_ok Testable.float test_float_of_rpc let test_float () = check_marshal_unmarshal (2.0, Rpc.Float 2.0, rpc_of_test_float, test_float_of_rpc) let test_float_from_int () = check_unmarshal_float_ok 1.0 (Rpc.Int 1L) let test_float_from_int32 () = check_unmarshal_float_ok 1.0 (Rpc.Int32 1l) let test_float_from_string () = check_unmarshal_float_ok 1.0 (Rpc.String "1.0") let test_bad_float () = check_unmarshal_error test_float_of_rpc (Rpc.Enum []) let test_bad_float_string () = check_unmarshal_error test_float_of_rpc (Rpc.String "xxy") type test_bool = bool [@@deriving rpc] let test_bool () = check_marshal_unmarshal (true, Rpc.Bool true, rpc_of_test_bool, test_bool_of_rpc) let test_bad_bool () = check_unmarshal_error test_bool_of_rpc (Rpc.String "true") type test_char = char [@@deriving rpc] let test_char () = check_marshal_unmarshal ('x', Rpc.Int (Char.code 'x' |> Int64.of_int), rpc_of_test_char, test_char_of_rpc) let test_bad_char () = check_unmarshal_error test_char_of_rpc (Rpc.String "x") type test_int_list = int list [@@deriving rpc] let test_int_list () = check_marshal_unmarshal ( [ 1; 2; 3; 4 ] , Rpc.Enum [ Rpc.Int 1L; Rpc.Int 2L; Rpc.Int 3L; Rpc.Int 4L ] , rpc_of_test_int_list , test_int_list_of_rpc ) type test_dict = (string * int) list [@@deriving rpc] let test_dict () = check_marshal_unmarshal ( [ "foo", 1; "bar", 2; "baz", 3 ] , Rpc.Dict [ "foo", Rpc.Int 1L; "bar", Rpc.Int 2L; "baz", Rpc.Int 3L ] , rpc_of_test_dict , test_dict_of_rpc ) type key = string [@@deriving rpc] type test_dict_key = (key * int) list [@@deriving rpc] let test_dict_key () = check_marshal_unmarshal ( [ "foo", 1; "bar", 2; "baz", 3 ] , Rpc.Dict [ "foo", Rpc.Int 1L; "bar", Rpc.Int 2L; "baz", Rpc.Int 3L ] , rpc_of_test_dict_key , test_dict_key_of_rpc ) type test_int_array = int array [@@deriving rpc] let test_int_array () = check_marshal_unmarshal ( [| 1; 2; 3; 4 |] , Rpc.Enum [ Rpc.Int 1L; Rpc.Int 2L; Rpc.Int 3L; Rpc.Int 4L ] , rpc_of_test_int_array , test_int_array_of_rpc ) type test_tuple2 = int * string [@@deriving rpc] let test_tuple2 () = check_marshal_unmarshal ( (3, "hello") , Rpc.Enum [ Rpc.Int 3L; Rpc.String "hello" ] , rpc_of_test_tuple2 , test_tuple2_of_rpc ) type test_tuple3 = int * string * char [@@deriving rpc] let test_tuple3 () = check_marshal_unmarshal ( (3, "hi", 'c') , Rpc.Enum [ Rpc.Int 3L; Rpc.String "hi"; Rpc.Int (Char.code 'c' |> Int64.of_int) ] , rpc_of_test_tuple3 , test_tuple3_of_rpc ) type test_option = int option [@@deriving rpc] let test_option () = check_marshal_unmarshal (Some 1, Rpc.Enum [ Rpc.Int 1L ], rpc_of_test_option, test_option_of_rpc) let test_option_none () = check_marshal_unmarshal (None, Rpc.Enum [], rpc_of_test_option, test_option_of_rpc) let test_bad_option () = check_unmarshal_error test_option_of_rpc (Rpc.Int 5L) type test_constr = test_int [@@deriving rpc] let test_constr () = check_marshal_unmarshal (1, Rpc.Int 1L, rpc_of_test_constr, test_constr_of_rpc) type test_variant = | VNone | VOne of int | VTwo of (int * int) [@@deriving rpc] let test_variant () = check_marshal_unmarshal (VNone, Rpc.String "VNone", rpc_of_test_variant, test_variant_of_rpc) let test_variant1 () = check_marshal_unmarshal ( VOne 1 , Rpc.Enum [ Rpc.String "VOne"; Rpc.Int 1L ] , rpc_of_test_variant , test_variant_of_rpc ) let test_variant2 () = check_marshal_unmarshal ( VTwo (1, 2) , Rpc.Enum [ Rpc.String "VTwo"; Rpc.Enum [ Rpc.Int 1L; Rpc.Int 2L ] ] , rpc_of_test_variant , test_variant_of_rpc ) let test_variant_case () = check_unmarshal_ok (Testable.from_rpc_of_t rpc_of_test_variant) test_variant_of_rpc VNone (Rpc.String "vnone") let test_bad_variant_case () = check_unmarshal_error test_variant_of_rpc (Rpc.Enum [ Rpc.String "vtwo"; Rpc.Int 5L ]) type test_variant_name = | VThree of int [@name "bob"] | VFour [@name "lofty"] [@@deriving rpc] let test_variant_name () = check_marshal_unmarshal ( VThree 5 , Rpc.Enum [ Rpc.String "bob"; Rpc.Int 5L ] , rpc_of_test_variant_name , test_variant_name_of_rpc ) let test_variant_name2 () = check_marshal_unmarshal (VFour, Rpc.String "lofty", rpc_of_test_variant_name, test_variant_name_of_rpc) type test_record = { fiEld1 : int ; fiEld2 : string } [@@deriving rpc] let test_record () = check_marshal_unmarshal ( { fiEld1 = 7; fiEld2 = "banana" } , Rpc.Dict [ "fiEld1", Rpc.Int 7L; "fiEld2", Rpc.String "banana" ] , rpc_of_test_record , test_record_of_rpc ) let test_record_case () = check_unmarshal_ok (Testable.from_rpc_of_t rpc_of_test_record) test_record_of_rpc { fiEld1 = 7; fiEld2 = "banana" } (Rpc.Dict [ "field1", Rpc.Int 7L; "FIELD2", Rpc.String "banana" ]) let test_bad_record () = check_unmarshal_error test_record_of_rpc (Rpc.Dict [ "field1", Rpc.Int 7L ]) type test_record_opt = { field3 : int option ; field4 : string option } [@@deriving rpc] let test_record_opt1 () = check_marshal_unmarshal ( { field3 = Some 7; field4 = Some "banana" } , Rpc.Dict [ "field3", Rpc.Int 7L; "field4", Rpc.String "banana" ] , rpc_of_test_record_opt , test_record_opt_of_rpc ) let test_record_opt2 () = check_marshal_unmarshal ( { field3 = Some 7; field4 = None } , Rpc.Dict [ "field3", Rpc.Int 7L ] , rpc_of_test_record_opt , test_record_opt_of_rpc ) let test_record_opt3 () = check_marshal_unmarshal ( { field3 = None; field4 = Some "hamster" } , Rpc.Dict [ "field4", Rpc.String "hamster" ] , rpc_of_test_record_opt , test_record_opt_of_rpc ) let test_record_opt4 () = check_marshal_unmarshal ( { field3 = None; field4 = None } , Rpc.Dict [] , rpc_of_test_record_opt , test_record_opt_of_rpc ) type test_record_attrs = { field5 : int [@key "foo"] } [@@deriving rpc] let test_record_attrs () = check_marshal_unmarshal ( { field5 = 6 } , Rpc.Dict [ "foo", Rpc.Int 6L ] , rpc_of_test_record_attrs , test_record_attrs_of_rpc ) type 'a test_poly = 'a list [@@deriving rpc] let test_poly () = let (x : int test_poly) = [ 1; 2; 3 ] in check_marshal_unmarshal ( x , Rpc.Enum [ Rpc.Int 1L; Rpc.Int 2L; Rpc.Int 3L ] , rpc_of_test_poly Rpc.rpc_of_int , test_poly_of_rpc Rpc.int_of_rpc ) type 'a myref = string [@@deriving rpc] type vdi_ref = [ `VDI ] myref [@@deriving rpc] type test_polyvar = [ `one | `two of int | `thRee of int * int ] [@@deriving rpc] let test_polyvar () = check_marshal_unmarshal (`one, Rpc.String "one", rpc_of_test_polyvar, test_polyvar_of_rpc) let test_polyvar2 () = check_marshal_unmarshal ( `two 2 , Rpc.Enum [ Rpc.String "two"; Rpc.Int 2L ] , rpc_of_test_polyvar , test_polyvar_of_rpc ) let test_polyvar3 () = check_marshal_unmarshal ( `thRee (4, 5) , Rpc.Enum [ Rpc.String "thRee"; Rpc.Enum [ Rpc.Int 4L; Rpc.Int 5L ] ] , rpc_of_test_polyvar , test_polyvar_of_rpc ) let test_polyvar_case () = check_unmarshal_ok (Testable.from_rpc_of_t rpc_of_test_polyvar) test_polyvar_of_rpc (`thRee (4, 5)) (Rpc.Enum [ Rpc.String "THREE"; Rpc.Enum [ Rpc.Int 4L; Rpc.Int 5L ] ]) type test_pvar_inherit = [ `four of string | test_polyvar ] [@@deriving rpc] let test_pvar_inherit () = check_marshal_unmarshal (`one, Rpc.String "one", rpc_of_test_pvar_inherit, test_pvar_inherit_of_rpc) let test_pvar_inherit2 () = check_marshal_unmarshal ( `four "apple" , Rpc.Enum [ Rpc.String "four"; Rpc.String "apple" ] , rpc_of_test_pvar_inherit , test_pvar_inherit_of_rpc ) type enum = ([ `x | `y | `z | `default ] [@default `default]) [@@deriving rpc] let test_default_enum () = check_unmarshal_ok (Testable.from_rpc_of_t rpc_of_enum) enum_of_rpc `default (Rpc.String "unknown_enum"); check_unmarshal_ok (Testable.from_rpc_of_t rpc_of_enum) enum_of_rpc `default (Rpc.Enum [ Rpc.String "thRee"; Rpc.Enum [ Rpc.Int 4L; Rpc.Int 5L ] ]); check_unmarshal_error enum_of_rpc (Rpc.Enum [ Rpc.Int 6L ]); check_unmarshal_error enum_of_rpc (Rpc.Dict [ "foo", Rpc.String "bar" ]); check_unmarshal_error enum_of_rpc (Rpc.Int 1L); check_unmarshal_error enum_of_rpc (Rpc.Float 1.0) type enum_string_map = (enum * string) list [@@deriving rpc] let test_enum_string_map () = check_marshal_unmarshal ( [ `x, "x"; `y, "y"; `z, "z" ] , Rpc.Dict [ "x", Rpc.String "x"; "y", Rpc.String "y"; "z", Rpc.String "z" ] , rpc_of_enum_string_map , enum_string_map_of_rpc ) type enum2 = [ `a | `b | `c ] [@@deriving rpc] type enum_string_map2 = ((enum2 * string) list[@dict]) [@@deriving rpc] let test_enum_string_map2 () = check_marshal_unmarshal ( [ `a, "x"; `b, "y"; `c, "z" ] , Rpc.Dict [ "a", Rpc.String "x"; "b", Rpc.String "y"; "c", Rpc.String "z" ] , rpc_of_enum_string_map2 , enum_string_map2_of_rpc ) let tests = [ "int", `Quick, test_int ; "int_from_string", `Quick, test_int_from_string ; "bad_int", `Quick, test_bad_int ; "bad_int_string", `Quick, test_bad_int_string ; "int32", `Quick, test_int32 ; "int32_from_string", `Quick, test_int32_from_string ; "bad_int32", `Quick, test_bad_int32 ; "bad_int32_string", `Quick, test_bad_int32_string ; "int64", `Quick, test_int64 ; "int64_from_string", `Quick, test_int64_from_string ; "bad_int64", `Quick, test_bad_int64 ; "bad_int64_string", `Quick, test_bad_int64_string ; "unit", `Quick, test_unit ; "bad_unit", `Quick, test_bad_unit ; "string", `Quick, test_string ; "bad_string", `Quick, test_bad_string ; "float", `Quick, test_float ; "float_from_int", `Quick, test_float_from_int ; "float_from_int32", `Quick, test_float_from_int32 ; "float_from_string", `Quick, test_float_from_string ; "bad_float", `Quick, test_bad_float ; "bad_float_string", `Quick, test_bad_float_string ; "bool", `Quick, test_bool ; "bad_bool", `Quick, test_bad_bool ; "char", `Quick, test_char ; "bad_char", `Quick, test_bad_char ; "int list", `Quick, test_int_list ; "int array", `Quick, test_int_array ; "dict", `Quick, test_dict ; "dict_key", `Quick, test_dict_key ; "tuple2", `Quick, test_tuple2 ; "tuple3", `Quick, test_tuple3 ; "option", `Quick, test_option ; "option (none)", `Quick, test_option_none ; "bad_option", `Quick, test_bad_option ; "constr", `Quick, test_constr ; "variant", `Quick, test_variant ; "variant1", `Quick, test_variant1 ; "variant2", `Quick, test_variant2 ; "variant_case", `Quick, test_variant_case ; "variant_name", `Quick, test_variant_name ; "variant_name2", `Quick, test_variant_name2 ; "bad_variant_case", `Quick, test_bad_variant_case ; "record", `Quick, test_record ; "record_case", `Quick, test_record_case ; "bad_record", `Quick, test_bad_record ; "record_opt1", `Quick, test_record_opt1 ; "record_opt2", `Quick, test_record_opt2 ; "record_opt3", `Quick, test_record_opt3 ; "record_opt4", `Quick, test_record_opt4 ; "record_attrs", `Quick, test_record_attrs ; "poly", `Quick, test_poly ; "polyvar", `Quick, test_polyvar ; "polyvar2", `Quick, test_polyvar2 ; "polyvar3", `Quick, test_polyvar3 ; "polyvar_case", `Quick, test_polyvar_case ; "pvar_inherit", `Quick, test_pvar_inherit ; "pvar_inherit2", `Quick, test_pvar_inherit2 ; "default_enum", `Quick, test_default_enum ; "enum_string_map", `Quick, test_enum_string_map ; "enum_string_map2", `Quick, test_enum_string_map2 ]
(* All the ppx tests *) (* Check that t -/-> of_rpc but t -> t_of_rpc *) type t = int [@@deriving rpc]
global_constants_storage.ml
open Michelson_v1_primitives (* See [expand] for an example. TODO: https://gitlab.com/tezos/tezos/-/issues/1609 Move function to lib_micheline. On our next opportunity to update the environment, we should move this function to lib_micheline. *) let bottom_up_fold_cps initial_accumulator node initial_k f = let rec traverse_node accu node k = f accu node @@ fun accu node -> match node with | String _ | Int _ | Bytes _ -> k accu node | Prim (loc, prim, args, annot) -> (traverse_nodes [@ocaml.tailcall]) accu args @@ fun accu args -> f accu (Prim (loc, prim, args, annot)) k | Seq (loc, elts) -> (traverse_nodes [@ocaml.tailcall]) accu elts @@ fun accu elts -> f accu (Seq (loc, elts)) k and traverse_nodes accu nodes k = match nodes with | [] -> k accu [] | node :: nodes -> (traverse_node [@ocaml.tailcall]) accu node @@ fun accu node -> (traverse_nodes [@ocaml.tailcall]) accu nodes @@ fun accu nodes -> k accu (node :: nodes) in traverse_node initial_accumulator node initial_k [@@coq_axiom_with_reason "local mutually recursive definition not handled"] module Gas_costs = Global_constants_costs module Expr_hash_map = Map.Make (Script_expr_hash) type error += Expression_too_deep type error += Expression_already_registered type error += Badly_formed_constant_expression type error += Nonexistent_global type error += Expression_too_large let () = let description = "Attempted to register an expression that, after fully expanding all \ referenced global constants, would result in too many levels of nesting." in register_error_kind `Branch ~id:"Expression_too_deep" ~title:"Expression too deep" ~description ~pp:(fun ppf () -> Format.fprintf ppf "%s" description) Data_encoding.empty (function Expression_too_deep -> Some () | _ -> None) (fun () -> Expression_too_deep) ; let description = "Attempted to register an expression as global constant that has already \ been registered." in register_error_kind `Branch ~id:"Expression_already_registered" ~title:"Expression already registered" ~description ~pp:(fun ppf () -> Format.fprintf ppf "%s" description) Data_encoding.empty (function Expression_already_registered -> Some () | _ -> None) (fun () -> Expression_already_registered) ; let description = "Found a badly formed constant expression. The 'constant' primitive must \ always be followed by a string of the hash of the expression it points \ to." in register_error_kind `Branch ~id:"Badly_formed_constant_expression" ~title:"Badly formed constant expression" ~description ~pp:(fun ppf () -> Format.fprintf ppf "%s" description) Data_encoding.empty (function Badly_formed_constant_expression -> Some () | _ -> None) (fun () -> Badly_formed_constant_expression) ; let description = "No registered global was found at the given hash in storage." in register_error_kind `Branch ~id:"Nonexistent_global" ~title:"Tried to look up nonexistent global" ~description ~pp:(fun ppf () -> Format.fprintf ppf "%s" description) Data_encoding.empty (function Nonexistent_global -> Some () | _ -> None) (fun () -> Nonexistent_global) ; let description = "Encountered an expression that, after expanding all constants, is larger \ than the expression size limit." in register_error_kind `Branch ~id:"Expression_too_large" ~title:"Expression too large" ~description ~pp:(fun ppf () -> Format.fprintf ppf "%s" description) Data_encoding.empty (function Expression_too_large -> Some () | _ -> None) (fun () -> Expression_too_large) let get context hash = Storage.Global_constants.Map.find context hash >>=? fun (context, value) -> match value with | None -> fail Nonexistent_global | Some value -> return (context, value) let expr_to_address_in_context context expr = let lexpr = Script_repr.lazy_expr expr in Raw_context.consume_gas context @@ Script_repr.force_bytes_cost lexpr >>? fun context -> Script_repr.force_bytes lexpr >>? fun b -> Raw_context.consume_gas context @@ Gas_costs.expr_to_address_in_context_cost b >|? fun context -> (context, Script_expr_hash.hash_bytes [b]) let node_too_large node = let node_size = Script_repr.Micheline_size.of_node node in let nodes = Saturation_repr.to_int node_size.nodes in let string_bytes = Saturation_repr.to_int node_size.string_bytes in let z_bytes = Saturation_repr.to_int node_size.z_bytes in Compare.Int.( nodes > Constants_repr.max_micheline_node_count || string_bytes + z_bytes > Constants_repr.max_micheline_bytes_limit) let expand_node context node = (* We charge for traversing the top-level node at the beginning. Inside the loop, we charge for traversing each new constant that gets expanded. *) Raw_context.consume_gas context (Gas_costs.expand_no_constants_branch_cost node) >>?= fun context -> bottom_up_fold_cps (* We carry a Boolean representing whether we had to do any expansions or not. *) (context, Expr_hash_map.empty, false) node (fun (context, _, did_expansion) node -> return (context, node, did_expansion)) (fun (context, map, did_expansion) node k -> match node with | Prim (_, H_constant, args, annot) -> ( (* Charge for validating the b58check hash. *) Raw_context.consume_gas context Gas_costs.expand_constants_branch_cost >>?= fun context -> match (args, annot) with (* A constant Prim should always have a single String argument, being a properly formatted hash. *) | ([String (_, address)], []) -> ( match Script_expr_hash.of_b58check_opt address with | None -> fail Badly_formed_constant_expression | Some hash -> ( match Expr_hash_map.find hash map with | Some node -> (* Charge traversing the newly retrieved node *) Raw_context.consume_gas context (Gas_costs.expand_no_constants_branch_cost node) >>?= fun context -> k (context, map, true) node | None -> get context hash >>=? fun (context, expr) -> (* Charge traversing the newly retrieved node *) let node = root expr in Raw_context.consume_gas context (Gas_costs.expand_no_constants_branch_cost node) >>?= fun context -> k (context, Expr_hash_map.add hash node map, true) node)) | _ -> fail Badly_formed_constant_expression) | Int _ | String _ | Bytes _ | Prim _ | Seq _ -> k (context, map, did_expansion) node) >>=? fun (context, node, did_expansion) -> if did_expansion then (* Gas charged during expansion is at least proportional to the size of the resulting node so the execution time of [node_too_large] is already covered. *) if node_too_large node then fail Expression_too_large else return (context, node) else return (context, node) let expand context expr = expand_node context (root expr) >|=? fun (context, node) -> (context, strip_locations node) (** Computes the maximum depth of a Micheline node. Fails with [Expression_too_deep] if greater than [max_allowed_global_constant_depth].*) let check_depth node = let rec advance node depth k = if Compare.Int.(depth > Constants_repr.max_allowed_global_constant_depth) then error Expression_too_deep else match node with | Int _ | String _ | Bytes _ | Prim (_, _, [], _) | Seq (_, []) -> (k [@tailcall]) (depth + 1) | Prim (loc, _, hd :: tl, _) | Seq (loc, hd :: tl) -> (advance [@tailcall]) hd (depth + 1) (fun dhd -> (advance [@tailcall]) (* Because [depth] doesn't care about the content of the expression, we can safely throw away information about primitives and replace them with the [Seq] constructor.*) (Seq (loc, tl)) depth (fun dtl -> (k [@tailcall]) (Compare.Int.max dhd dtl))) in advance node 0 (fun x -> Ok x) let register context value = (* To calculate the total depth, we first expand all constants in the expression. This may fail with [Expression_too_large]. Though the stored expression is the unexpanded version. *) expand_node context (root value) >>=? fun (context, node) -> (* We do not need to carbonate [check_depth]. [expand_node] and [Storage.Global_constants.Map.init] are already carbonated with gas at least proportional to the size of the expanded node and the computation cost of [check_depth] is of the same order. *) check_depth node >>?= fun (_depth : int) -> expr_to_address_in_context context value >>?= fun (context, key) -> trace Expression_already_registered @@ Storage.Global_constants.Map.init context key value >|=? fun (context, size) -> (context, key, Z.of_int size) module Internal_for_tests = struct let node_too_large = node_too_large let bottom_up_fold_cps = bottom_up_fold_cps let expr_to_address_in_context = expr_to_address_in_context end
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Marigold <team@marigold.dev> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************) open Micheline
stdLabels.mli
module Array : sig external length : 'a array -> int = "%array_length" external get : 'a array -> int -> 'a = "%array_safe_get" external set : 'a array -> int -> 'a -> unit = "%array_safe_set" external make : int -> 'a -> 'a array = "caml_make_vect" external create : int -> 'a -> 'a array = "caml_make_vect" val init : int -> f:(int -> 'a) -> 'a array val make_matrix : dimx:int -> dimy:int -> 'a -> 'a array array val create_matrix : dimx:int -> dimy:int -> 'a -> 'a array array val append : 'a array -> 'a array -> 'a array val concat : 'a array list -> 'a array val sub : 'a array -> pos:int -> len:int -> 'a array val copy : 'a array -> 'a array val fill : 'a array -> pos:int -> len:int -> 'a -> unit val blit : src:'a array -> src_pos:int -> dst:'a array -> dst_pos:int -> len:int -> unit val to_list : 'a array -> 'a list val of_list : 'a list -> 'a array val iter : f:('a -> unit) -> 'a array -> unit val map : f:('a -> 'b) -> 'a array -> 'b array val iteri : f:(int -> 'a -> unit) -> 'a array -> unit val mapi : f:(int -> 'a -> 'b) -> 'a array -> 'b array val fold_left : f:('a -> 'b -> 'a) -> init:'a -> 'b array -> 'a val fold_right : f:('a -> 'b -> 'b) -> 'a array -> init:'b -> 'b val sort : cmp:('a -> 'a -> int) -> 'a array -> unit val stable_sort : cmp:('a -> 'a -> int) -> 'a array -> unit val fast_sort : cmp:('a -> 'a -> int) -> 'a array -> unit external unsafe_get : 'a array -> int -> 'a = "%array_unsafe_get" external unsafe_set : 'a array -> int -> 'a -> unit = "%array_unsafe_set" end module List : sig val length : 'a list -> int val hd : 'a list -> 'a val tl : 'a list -> 'a list val nth : 'a list -> int -> 'a val rev : 'a list -> 'a list val append : 'a list -> 'a list -> 'a list val rev_append : 'a list -> 'a list -> 'a list val concat : 'a list list -> 'a list val flatten : 'a list list -> 'a list val iter : f:('a -> unit) -> 'a list -> unit val map : f:('a -> 'b) -> 'a list -> 'b list val rev_map : f:('a -> 'b) -> 'a list -> 'b list val fold_left : f:('a -> 'b -> 'a) -> init:'a -> 'b list -> 'a val fold_right : f:('a -> 'b -> 'b) -> 'a list -> init:'b -> 'b val iter2 : f:('a -> 'b -> unit) -> 'a list -> 'b list -> unit val map2 : f:('a -> 'b -> 'c) -> 'a list -> 'b list -> 'c list val rev_map2 : f:('a -> 'b -> 'c) -> 'a list -> 'b list -> 'c list val fold_left2 : f:('a -> 'b -> 'c -> 'a) -> init:'a -> 'b list -> 'c list -> 'a val fold_right2 : f:('a -> 'b -> 'c -> 'c) -> 'a list -> 'b list -> init:'c -> 'c val for_all : f:('a -> bool) -> 'a list -> bool val exists : f:('a -> bool) -> 'a list -> bool val for_all2 : f:('a -> 'b -> bool) -> 'a list -> 'b list -> bool val exists2 : f:('a -> 'b -> bool) -> 'a list -> 'b list -> bool val mem : 'a -> set:'a list -> bool val memq : 'a -> set:'a list -> bool val find : f:('a -> bool) -> 'a list -> 'a val filter : f:('a -> bool) -> 'a list -> 'a list val find_all : f:('a -> bool) -> 'a list -> 'a list val partition : f:('a -> bool) -> 'a list -> ('a list * 'a list) val assoc : 'a -> ('a * 'b) list -> 'b val assq : 'a -> ('a * 'b) list -> 'b val mem_assoc : 'a -> map:('a * 'b) list -> bool val mem_assq : 'a -> map:('a * 'b) list -> bool val remove_assoc : 'a -> ('a * 'b) list -> ('a * 'b) list val remove_assq : 'a -> ('a * 'b) list -> ('a * 'b) list val split : ('a * 'b) list -> ('a list * 'b list) val combine : 'a list -> 'b list -> ('a * 'b) list val sort : cmp:('a -> 'a -> int) -> 'a list -> 'a list val stable_sort : cmp:('a -> 'a -> int) -> 'a list -> 'a list val fast_sort : cmp:('a -> 'a -> int) -> 'a list -> 'a list val merge : cmp:('a -> 'a -> int) -> 'a list -> 'a list -> 'a list end module String : sig external length : string -> int = "%string_length" external get : string -> int -> char = "%string_safe_get" external set : string -> int -> char -> unit = "%string_safe_set" external create : int -> string = "caml_create_string" val make : int -> char -> string val copy : string -> string val sub : string -> pos:int -> len:int -> string val fill : string -> pos:int -> len:int -> char -> unit val blit : src:string -> src_pos:int -> dst:string -> dst_pos:int -> len:int -> unit val concat : sep:string -> string list -> string val iter : f:(char -> unit) -> string -> unit val escaped : string -> string val index : string -> char -> int val rindex : string -> char -> int val index_from : string -> int -> char -> int val rindex_from : string -> int -> char -> int val contains : string -> char -> bool val contains_from : string -> int -> char -> bool val rcontains_from : string -> int -> char -> bool val uppercase : string -> string val lowercase : string -> string val capitalize : string -> string val uncapitalize : string -> string type t = string val compare : t -> t -> int external unsafe_get : string -> int -> char = "%string_unsafe_get" external unsafe_set : string -> int -> char -> unit = "%string_unsafe_set" external unsafe_blit : src:string -> src_pos:int -> dst:string -> dst_pos:int -> len:int -> unit = "caml_blit_string" "noalloc" external unsafe_fill : string -> pos:int -> len:int -> char -> unit = "caml_fill_string" "noalloc" end
dune
; File auto-generated by gentests.ml ; Auto-generated part begin ; Test for float.smt2 ; Incremental test (rule (target float.incremental) (deps (:input float.smt2)) (package dolmen_bin) (action (chdir %{workspace_root} (with-outputs-to %{target} (with-accepted-exit-codes (or 0 (not 0)) (run dolmen --mode=incremental --color=never %{input} %{read-lines:flags.dune})))))) (rule (alias runtest) (package dolmen_bin) (action (diff float.expected float.incremental))) ; Full mode test (rule (target float.full) (deps (:input float.smt2)) (package dolmen_bin) (action (chdir %{workspace_root} (with-outputs-to %{target} (with-accepted-exit-codes (or 0 (not 0)) (run dolmen --mode=full --color=never %{input} %{read-lines:flags.dune})))))) (rule (alias runtest) (package dolmen_bin) (action (diff float.expected float.full))) ; Auto-generated part end
dune
(executable (name gen_stubs) (modules gen_stubs) (libraries sys-socket-unix.stubs ctypes.stubs)) (executable (name gen_types_c) (modules gen_types_c) (libraries sys-socket-unix.types ctypes.stubs)) (rule (targets gen_types.c) (deps (:gen ./gen_types_c.exe)) (action (run %{gen} %{targets}))) (rule (targets gen_types_c) (deps (:c_code ./gen_types.c)) (action (run %{ocaml-config:c_compiler} -I %{lib:ctypes:} -I %{ocaml-config:standard_library} -o %{targets} %{c_code}))) (executable (name gen_constants_c) (modules gen_constants_c) (libraries sys-socket-unix.constants ctypes.stubs)) (rule (targets gen_constants.c) (deps (:gen ./gen_constants_c.exe)) (action (run %{gen} %{targets}))) (rule (targets gen_constants_c) (deps (:c_code ./gen_constants.c)) (action (run %{ocaml-config:c_compiler} -I %{lib:ctypes:} -I %{ocaml-config:standard_library} -o %{targets} %{c_code})))
baking_nonces.ml
open Protocol open Alpha_context module Events = Baking_events.Nonces type state = { cctxt : Protocol_client_context.full; chain : Chain_services.chain; constants : Constants.t; config : Baking_configuration.nonce_config; nonces_location : [`Nonce] Baking_files.location; mutable last_predecessor : Block_hash.t; } type t = state type nonces = Nonce.t Block_hash.Map.t let empty = Block_hash.Map.empty let encoding = let open Data_encoding in def "seed_nonce" @@ conv (fun m -> Block_hash.Map.fold (fun hash nonce acc -> (hash, nonce) :: acc) m []) (fun l -> List.fold_left (fun map (hash, nonce) -> Block_hash.Map.add hash nonce map) Block_hash.Map.empty l) @@ list (obj2 (req "block" Block_hash.encoding) (req "nonce" Nonce.encoding)) let may_migrate (wallet : Protocol_client_context.full) location = let base_dir = wallet#get_base_dir in let current_file = Filename.Infix.((base_dir // Baking_files.filename location) ^ "s") in Lwt_unix.file_exists current_file >>= function | true -> (* Migration already occured *) Lwt.return_unit | false -> ( let legacy_file = Filename.Infix.(base_dir // "nonces") in Lwt_unix.file_exists legacy_file >>= function | false -> (* Do nothing *) Lwt.return_unit | true -> Lwt_utils_unix.copy_file ~src:legacy_file ~dst:current_file) let load (wallet : #Client_context.wallet) location = wallet#load (Baking_files.filename location) ~default:empty encoding let save (wallet : #Client_context.wallet) location nonces = wallet#write (Baking_files.filename location) nonces encoding let mem nonces hash = Block_hash.Map.mem hash nonces let find_opt nonces hash = Block_hash.Map.find hash nonces let add nonces hash nonce = Block_hash.Map.add hash nonce nonces let remove nonces hash = Block_hash.Map.remove hash nonces let remove_all nonces nonces_to_remove = Block_hash.Map.fold (fun hash _ acc -> remove acc hash) nonces_to_remove nonces let get_block_level_opt cctxt ~chain ~block = Shell_services.Blocks.Header.shell_header cctxt ~chain ~block () >>= function | Ok {level; _} -> Lwt.return_some level | Error errs -> Events.( emit cant_retrieve_block_header_for_nonce (Block_services.to_string block, errs)) >>= fun () -> Lwt.return_none let get_outdated_nonces {cctxt; constants; chain; _} nonces = let {Constants.parametric = {blocks_per_cycle; preserved_cycles; _}; _} = constants in get_block_level_opt cctxt ~chain ~block:(`Head 0) >>= function | None -> Events.(emit cannot_fetch_chain_head_level ()) >>= fun () -> return (empty, empty) | Some current_level -> let current_cycle = Int32.(div current_level blocks_per_cycle) in let is_older_than_preserved_cycles block_level = let block_cycle = Int32.(div block_level blocks_per_cycle) in Int32.sub current_cycle block_cycle > Int32.of_int preserved_cycles in Block_hash.Map.fold (fun hash nonce acc -> acc >>=? fun (orphans, outdated) -> get_block_level_opt cctxt ~chain ~block:(`Hash (hash, 0)) >>= function | Some level -> if is_older_than_preserved_cycles level then return (orphans, add outdated hash nonce) else acc | None -> return (add orphans hash nonce, outdated)) nonces (return (empty, empty)) let filter_outdated_nonces state nonces = get_outdated_nonces state nonces >>=? fun (orphans, outdated_nonces) -> when_ (Block_hash.Map.cardinal orphans >= 50) (fun () -> Events.( emit too_many_nonces (Baking_files.filename state.nonces_location ^ "s")) >>= fun () -> return_unit) >>=? fun () -> return (remove_all nonces outdated_nonces) let blocks_from_current_cycle {cctxt; chain; _} block ?(offset = 0l) () = Plugin.RPC.levels_in_current_cycle cctxt ~offset (chain, block) >>= function | Error (Tezos_rpc.Context.Not_found _ :: _) -> return_nil | Error _ as err -> Lwt.return err | Ok (first, last) -> ( Shell_services.Blocks.hash cctxt ~chain ~block () >>=? fun hash -> Shell_services.Blocks.Header.shell_header cctxt ~chain ~block () >>=? fun {level; _} -> (* FIXME: crappy algorithm, change this *) (* Compute how many blocks below current level we should ask for *) let length = Int32.to_int (Int32.sub level (Raw_level.to_int32 first)) in Shell_services.Blocks.list cctxt ~chain ~heads:[hash] ~length () (* Looks like this function call retrieves a list of blocks ordered from latest to earliest - decreasing order of insertion in the chain *) >>=? function | [blocks] -> if Int32.equal level (Raw_level.to_int32 last) then (* We have just retrieved a block list of the right size starting at first until last *) return blocks else (* Remove all the latest blocks from last up to length*) List.drop_n (length - Int32.to_int (Raw_level.diff last first)) blocks |> return | l -> failwith "Baking_nonces.blocks_from_current_cycle: unexpected block list of \ size %d (expected 1)" (List.length l)) let get_unrevealed_nonces ({cctxt; chain; _} as state) nonces = blocks_from_current_cycle state (`Head 0) ~offset:(-1l) () >>=? fun blocks -> List.filter_map_es (fun hash -> match find_opt nonces hash with | None -> return_none | Some nonce -> ( get_block_level_opt cctxt ~chain ~block:(`Hash (hash, 0)) >>= function | Some level -> ( Lwt.return (Environment.wrap_tzresult (Raw_level.of_int32 level)) >>=? fun level -> Alpha_services.Nonce.get cctxt (chain, `Head 0) level >>=? function | Missing nonce_hash when Nonce.check_hash nonce nonce_hash -> Events.( emit found_nonce_to_reveal (hash, Raw_level.to_int32 level)) >>= fun () -> return_some (level, nonce) | Missing _nonce_hash -> Events.(emit incoherent_nonce (Raw_level.to_int32 level)) >>= fun () -> return_none | Forgotten -> return_none | Revealed _ -> return_none) | None -> return_none)) blocks (* Nonce creation *) let generate_seed_nonce (nonce_config : Baking_configuration.nonce_config) (delegate : Baking_state.consensus_key) level = (match nonce_config with | Deterministic -> let data = Data_encoding.Binary.to_bytes_exn Raw_level.encoding level in Client_keys_v0.deterministic_nonce delegate.secret_key_uri data >>=? fun nonce -> return (Data_encoding.Binary.of_bytes_exn Nonce.encoding nonce) | Random -> ( match Nonce.of_bytes (Tezos_crypto.Rand.generate Constants.nonce_length) with | Error _errs -> assert false | Ok nonce -> return nonce)) >>=? fun nonce -> return (Nonce.hash nonce, nonce) let register_nonce (cctxt : #Protocol_client_context.full) ~chain_id block_hash nonce = Events.(emit registering_nonce block_hash) >>= fun () -> (* Register the nonce *) let nonces_location = Baking_files.resolve_location ~chain_id `Nonce in cctxt#with_lock @@ fun () -> load cctxt nonces_location >>=? fun nonces -> let nonces = add nonces block_hash nonce in save cctxt nonces_location nonces >>=? fun () -> return_unit let inject_seed_nonce_revelation (cctxt : #Protocol_client_context.full) ~chain ~block ~branch nonces = match nonces with | [] -> Events.(emit nothing_to_reveal branch) >>= fun () -> return_unit | _ -> List.iter_es (fun (level, nonce) -> Plugin.RPC.Forge.seed_nonce_revelation cctxt (chain, block) ~branch ~level ~nonce () >>=? fun bytes -> let bytes = Tezos_crypto.Signature.V0.concat bytes Tezos_crypto.Signature.V0.zero in Shell_services.Injection.operation ~async:true cctxt ~chain bytes >>=? fun oph -> Events.( emit revealing_nonce (Raw_level.to_int32 level, Chain_services.to_string chain, oph)) >>= fun () -> return_unit) nonces (** [reveal_potential_nonces] reveal registered nonces *) let reveal_potential_nonces state new_proposal = let {cctxt; chain; nonces_location; last_predecessor; _} = state in let new_predecessor_hash = new_proposal.Baking_state.predecessor.hash in if Block_hash.(last_predecessor <> new_predecessor_hash) && Protocol_hash.(new_proposal.predecessor.protocol = Protocol.hash) then ( (* only try revealing nonces when the proposal's predecessor is a new one *) state.last_predecessor <- new_predecessor_hash ; let block = `Head 0 in let branch = new_predecessor_hash in (* improve concurrency *) cctxt#with_lock @@ fun () -> load cctxt nonces_location >>= function | Error err -> Events.(emit cannot_read_nonces err) >>= fun () -> return_unit | Ok nonces -> ( get_unrevealed_nonces state nonces >>= function | Error err -> Events.(emit cannot_retrieve_unrevealed_nonces err) >>= fun () -> return_unit | Ok [] -> return_unit | Ok nonces_to_reveal -> ( inject_seed_nonce_revelation cctxt ~chain ~block ~branch nonces_to_reveal >>= function | Error err -> Events.(emit cannot_inject_nonces err) >>= fun () -> return_unit | Ok () -> (* If some nonces are to be revealed it means: - We entered a new cycle and we can clear old nonces ; - A revelation was not included yet in the cycle beginning. So, it is safe to only filter outdated_nonces there *) filter_outdated_nonces state nonces >>=? fun live_nonces -> save cctxt nonces_location live_nonces >>=? fun () -> return_unit))) else return_unit (* We suppose that the block stream is cloned by the caller *) let start_revelation_worker cctxt config chain_id constants block_stream = let nonces_location = Baking_files.resolve_location ~chain_id `Nonce in may_migrate cctxt nonces_location >>= fun () -> let chain = `Hash chain_id in let canceler = Lwt_canceler.create () in let should_shutdown = ref false in let state = { cctxt; chain; constants; config; nonces_location; last_predecessor = Block_hash.zero; } in let rec worker_loop () = Lwt_canceler.on_cancel canceler (fun () -> should_shutdown := true ; Lwt.return_unit) ; Lwt_stream.get block_stream >>= function | None -> (* The head stream closed meaning that the connection with the node was interrupted: exit *) return_unit | Some new_proposal -> if !should_shutdown then return_unit else reveal_potential_nonces state new_proposal >>=? fun () -> worker_loop () in Lwt.dont_wait (fun () -> Lwt.finalize (fun () -> Events.(emit revelation_worker_started ()) >>= fun () -> worker_loop () >>= fun _ -> (* never ending loop *) Lwt.return_unit) (fun () -> (* TODO *) Lwt.return_unit)) (fun _exn -> ()) ; Lwt.return canceler
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
json.ml
include Ezjsonm let rec list_iter_between f o = function | [] -> () | [h] -> f h | h::t -> f h; o (); list_iter_between f o t let escape_string s = let buf = Buffer.create 80 in Buffer.add_string buf "\""; for i = 0 to String.length s - 1 do let x = match s.[i] with | '\n' -> "\\n" | '\t' -> "\\t" | '\r' -> "\\r" | '\b' -> "\\b" | '\\' -> "\\\\" | '/' -> "\\/" | '"' -> "\\\"" | '\x0c' -> "\\f" | c -> String.make 1 c in Buffer.add_string buf x done; Buffer.add_string buf "\""; Buffer.contents buf let rec to_fct t f = match t with | `Bool b -> f (string_of_bool b) | `Float r -> f (Printf.sprintf "%g" r) | `String s -> f (escape_string s) | `Null -> f "null" | `A a -> f "["; list_iter_between (fun i -> to_fct i f) (fun () -> f ", ") a; f "]"; | `O a -> f "{"; list_iter_between (fun (k, v) -> to_fct (`String k) f; f ": "; to_fct v f) (fun () -> f ", ") a; f "}" let to_fct_hum t f = match t with | `Bool b -> f (string_of_bool b) | `Float r -> f (Printf.sprintf "%g" r) | `String s -> f (escape_string s) | `Null -> f "null" | `A a -> f "[ "; list_iter_between (fun i -> to_fct i f) (fun () -> f ", ") a; f " ]\n"; | `O a -> f "{"; list_iter_between (fun (k, v) -> to_fct (`String k) f; f ": "; to_fct v f) (fun () -> f ", ") a; f "}\n" let to_buffer v buf = to_fct v (fun s -> Buffer.add_string buf s) let to_string v = Ezjsonm.to_string ~minify:true (wrap v) let to_buffer_hum v buf = to_fct_hum v (fun s -> Buffer.add_string buf s) let to_string_hum v = Ezjsonm.to_string ~minify:false (wrap v) let of_string s = unwrap (Ezjsonm.from_string s) exception Runtime_error of string * value
(* * Copyright (C) 2006-2009 Citrix Systems Inc. * Copyright (c) 2010 Thomas Gazagnaire <thomas@gazagnaire.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. *)
sha256.ml
type ctx type buf = (int, Bigarray.int8_unsigned_elt, Bigarray.c_layout) Bigarray.Array1.t type t external init: unit -> ctx = "stub_sha256_init" external unsafe_update_substring: ctx -> string -> int -> int -> unit = "stub_sha256_update" external update_buffer: ctx -> buf -> unit = "stub_sha256_update_bigarray" external finalize: ctx -> t = "stub_sha256_finalize" external copy : ctx -> ctx = "stub_sha256_copy" external to_bin: t -> string = "stub_sha256_to_bin" external to_hex: t -> string = "stub_sha256_to_hex" external of_bin: bytes -> t = "stub_sha256_of_bin" external of_hex: string -> t = "stub_sha256_of_hex" external file_fast: string -> t = "stub_sha256_file" external equal: t -> t -> bool = "stub_sha256_equal" let blksize = 4096 let update_substring ctx s ofs len = if len <= 0 && String.length s < ofs + len then invalid_arg "substring"; unsafe_update_substring ctx s ofs len let update_string ctx s = unsafe_update_substring ctx s 0 (String.length s) let string s = let ctx = init () in unsafe_update_substring ctx s 0 (String.length s); finalize ctx let zero = string "" let substring s ofs len = if len <= 0 && String.length s < ofs + len then invalid_arg "substring"; let ctx = init () in unsafe_update_substring ctx s ofs len; finalize ctx let buffer buf = let ctx = init () in update_buffer ctx buf; finalize ctx let channel chan len = let ctx = init () and buf = Bytes.create blksize in let left = ref len and eof = ref false in while (!left == -1 || !left > 0) && not !eof do let len = if !left < 0 then blksize else (min !left blksize) in let readed = Stdlib.input chan buf 0 len in if readed = 0 then eof := true else ( let buf = Bytes.unsafe_to_string buf in unsafe_update_substring ctx buf 0 readed; (* [unsafe_update_substring] does not hold on to [buf], so we can mutate it again now *) if !left <> -1 then left := !left - readed ) done; if !left > 0 && !eof then raise End_of_file; finalize ctx let file name = let chan = open_in_bin name in let digest = channel chan (-1) in close_in chan; digest let input chan = channel chan (-1) let output chan digest = output_string chan (to_hex digest)
(* * Copyright (C) 2006-2009 Vincent Hanquez <tab@snarc.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * SHA256 OCaml binding *)
escaping.ml
open Core let%expect_test _ = print_string "alas, poor Yorick"; [%expect {| alas, poor Yorick (escaped) |}] ;; let%expect_test _ = print_string "hello world"; [%expect {| hello\032world (escaped) |}] ;; let%expect_test _ = print_string "hello\tworld"; [%expect {| hello\tworld (escaped) |}] ;; (*let%expect_test _ = print_string "hello\tworld"; [%expect {| hello\tworld (regexp) |}] ;; *)
slot_repr.mli
(** Slot index representation *) (** {1 Abstract type} *) (** A slot index is in essence a bounded whole number. That is, it is not allowed to overflow [max_value], nor does it wrap when calling [succ max_value]. In this case it returns an [Invalid_slot] error.*) type t type slot = t val encoding : t Data_encoding.t (** {1 Constructors }*) val zero : t (** Upper bound on the value a slot index can take *) val max_value : t (** [of_int i] creates a slot index from integer [i]. @return [Error (Invalid_slot i)] if [i < 0 || i > max_value], and [Ok slot] otherwise *) val of_int : int -> t tzresult (** [of_int_do_not_use_except_for_parameters i] is an unchecked construction function. It may be used in cases where one knows [0 <= i <= max_value], e.g., when creating protocol parameters. When in doubt, use [of_int] or [of_int_exn]. *) val of_int_do_not_use_except_for_parameters : int -> t (** {1 Operator and pretty-printer} *) (** [succ n] either returns an [Invalid_slot] error if [n] is [max_value] or [ok value] otherwise. *) val succ : t -> t tzresult (** {1 Conversion/Printing} *) (** [to_int slot] returns the integral representation of a slot index. This value is always a whole number. *) val to_int : t -> int val pp : Format.formatter -> t -> unit (** {1 Submodules} *) module Map : Map.S with type key = t module Set : Set.S with type elt = t include Compare.S with type t := t (** {2 Slot ranges} *) module Range : sig (** An ordered range of slots, in increasing order. *) type t (** {3 Constructor} *) (** [create ~min ~count] creates a full slot range starting at [min], of size [count], i.e, [min, min + count - 1]. [create] errors if - [min < 0] - [count < 1] - [min + count - 1 > max_value] *) val create : min:int -> count:int -> t tzresult (** {3 Iterators} *) (** [fold f acc range] folds [f] over the values of [range], in increasing order. *) val fold : ('a -> slot -> 'a) -> 'a -> t -> 'a (** [fold_es f acc range] folds [f] over the values of [range], in increasing order. *) val fold_es : ('a -> slot -> 'a tzresult Lwt.t) -> 'a -> t -> 'a tzresult Lwt.t (** [rev_fold_es f acc range] folds [f] over the values of [range], in decreasing order. *) val rev_fold_es : ('a -> slot -> 'a tzresult Lwt.t) -> 'a -> t -> 'a tzresult Lwt.t end
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
parsing_hacks_cpp.mli
val find_template_inf_sup : Token_views_cpp.token_extended list -> unit val find_template_commentize : Token_views_cpp.multi_grouped list -> unit val find_qualifier_commentize : Token_views_cpp.token_extended list -> unit val find_constructor_outside_class : Token_views_cpp.token_extended list -> unit val find_constructor : Token_views_cpp.token_extended list -> unit val find_constructed_object_and_more : Token_views_cpp.token_extended list -> unit val reclassify_tokens_before_idents_or_typedefs : Token_views_cpp.multi_grouped list -> unit
hello_world.ml
external hello_world : unit -> unit = "hello_world"
mockup_simulator.ml
type block = { rpc_context : Environment_context.rpc_context; protocol_data : Protocol.Alpha_context.Block_header.protocol_data; raw_protocol_data : Bytes.t; operations : Mockup.M.Block_services.operation list list; } type chain = block list (** As new blocks and operations are received they are pushed to an Lwt_pipe wrapped into this type. *) type broadcast = | Broadcast_block of Block_hash.t * Block_header.t * Operation.t list list | Broadcast_op of Operation_hash.t * Alpha_context.packed_operation (** The state of a mockup node. *) type state = { instance_index : int; (** Index of this node. Indices go from 0 to N-1 where N is the total number of bakers in the simulation. *) live_depth : int; (** How many blocks (counting from the head into the past) are considered live? *) mutable chain : chain; (** The chain as seen by this fake "node". *) mutable mempool : (Operation_hash.t * Mockup.M.Protocol.operation) list; (** Mempool of this fake "node". *) chain_table : chain Block_hash.Table.t; (** The chain table of this fake "node". It maps from block hashes to blocks. *) global_chain_table : block Block_hash.Table.t; (** The global chain table that allows us to look up blocks that may be missing in [chain_table], i.e. not known to this particular node. This is used to find unknown predecessors. The real node can ask about an unknown block and receive it on request, this is supposed to emulate that functionality. *) ctxt_table : Environment_context.rpc_context Context_hash.Table.t; (** The context table allows us to look up rpc_context by its hash. *) heads_pipe : (Block_hash.t * Block_header.t) Lwt_pipe.Unbounded.t; (** [heads_pipe] is used to implement the [monitor_heads] RPC. *) operations_pipe : (Operation_hash.t * Mockup.M.Protocol.operation) option Lwt_pipe.Unbounded.t; (** [operations_pipe] is used to implement the [operations_pipe] RPC. *) mutable streaming_operations : bool; (** A helper flag used to implement the monitor operations RPC. *) broadcast_pipes : broadcast Lwt_pipe.Unbounded.t list; (** Broadcast pipes per node. *) genesis_block_true_hash : Block_hash.t; (** True hash of the genesis block as calculated by the [Block_header.hash] function. *) } let accounts = Mockup.Protocol_parameters.default_value.bootstrap_accounts let chain_id = Chain_id.of_string_exn "main" let genesis_block_hash = Block_hash.of_b58check_exn "BLockGenesisGenesisGenesisGenesisGenesisCCCCCeZiLHU" let genesis_predecessor_block_hash = Block_hash.zero type propagation = Block | Pass | Delay of float type propagation_vector = propagation list module type Hooks = sig val on_inject_block : level:int32 -> round:int32 -> block_hash:Block_hash.t -> block_header:Block_header.t -> operations:Operation.t list list -> protocol_data:Alpha_context.Block_header.protocol_data -> (Block_hash.t * Block_header.t * Operation.t list list * propagation_vector) tzresult Lwt.t val on_inject_operation : op_hash:Operation_hash.t -> op:Alpha_context.packed_operation -> (Operation_hash.t * Alpha_context.packed_operation * propagation_vector) tzresult Lwt.t val on_new_head : block_hash:Block_hash.t -> block_header:Block_header.t -> (Block_hash.t * Block_header.t) option Lwt.t val on_new_operation : Operation_hash.t * Alpha_context.packed_operation -> (Operation_hash.t * Alpha_context.packed_operation) option Lwt.t val check_block_before_processing : level:int32 -> round:int32 -> block_hash:Block_hash.t -> block_header:Block_header.t -> protocol_data:Alpha_context.Block_header.protocol_data -> unit tzresult Lwt.t val check_chain_after_processing : level:int32 -> round:int32 -> chain:chain -> unit tzresult Lwt.t val check_mempool_after_processing : mempool:(Operation_hash.t * Mockup.M.Protocol.operation) list -> unit tzresult Lwt.t val stop_on_event : Baking_state.event -> bool val on_start_baker : baker_position:int -> delegates:Baking_state.delegate list -> cctxt:Protocol_client_context.full -> unit Lwt.t val check_chain_on_success : chain:chain -> unit tzresult Lwt.t end (** Return a series of blocks starting from the block with the given identifier. *) let locate_blocks (state : state) (block : Tezos_shell_services.Block_services.block) : block list tzresult Lwt.t = match block with | `Hash (hash, rel) -> ( match Block_hash.Table.find state.chain_table hash with | None -> failwith "locate_blocks: can't find the block %a" Block_hash.pp hash | Some chain0 -> let (_, chain) = List.split_n rel chain0 in return chain) | `Head rel -> let (_, chain) = List.split_n rel state.chain in return chain | `Level _ -> failwith "locate_blocks: `Level block spec not handled" | `Genesis -> failwith "locate_blocks: `Genesis block spec net handled" | `Alias _ -> failwith "locate_blocks: `Alias block spec not handled" (** Similar to [locate_blocks], but only returns the first block. *) let locate_block (state : state) (block : Tezos_shell_services.Block_services.block) : block tzresult Lwt.t = locate_blocks state block >>=? function | [] -> failwith "locate_block: can't find the block" | x :: _ -> return x (** Return the collection of live blocks for a given block identifier. *) let live_blocks (state : state) block = locate_blocks state block >>=? fun chain -> let (segment, _) = List.split_n state.live_depth chain in return (List.fold_left (fun set ({rpc_context; _} : block) -> let hash = rpc_context.Environment_context.block_hash in Block_hash.Set.add hash set) (Block_hash.Set.singleton state.genesis_block_true_hash) segment) (** Extract the round number from raw fitness. *) let round_from_raw_fitness raw_fitness = match Protocol.Alpha_context.Fitness.from_raw raw_fitness with | Ok fitness -> return (Alpha_context.Round.to_int32 (Protocol.Alpha_context.Fitness.round fitness)) | Error _ -> failwith "round_from_raw_fitness: cannot parse fitness" (** Extract level from a block header. *) let get_block_level (block_header : Block_header.t) = return block_header.shell.level (** Extract round from a block header. *) let get_block_round (block_header : Block_header.t) = round_from_raw_fitness block_header.shell.fitness (** Parse protocol data. *) let parse_protocol_data (protocol_data : Bytes.t) = match Data_encoding.Binary.of_bytes_opt Protocol.Alpha_context.Block_header.protocol_data_encoding protocol_data with | None -> failwith "can't parse protocol data of a block" | Some parsed_protocol_data -> return parsed_protocol_data (** Broadcast an operation or block according to the given propagation vector. *) let handle_propagation msg propagation_vector broadcast_pipes = List.iter_s (fun (propagation, pipe) -> match propagation with | Block -> Lwt.return () | Pass -> Lwt_pipe.Unbounded.push pipe msg ; Lwt.return_unit | Delay s -> Lwt.dont_wait (fun () -> Lwt_unix.sleep s >>= fun () -> Lwt_pipe.Unbounded.push pipe msg ; Lwt.return_unit) (fun _exn -> ()) ; Lwt.return ()) (List.combine_drop propagation_vector broadcast_pipes) >>= fun () -> return () (** Use the [user_hooks] to produce a module of functions that will perform the heavy lifting for the RPC implementations. *) let make_mocked_services_hooks (state : state) (user_hooks : (module Hooks)) : Faked_services.hooks = let module User_hooks = (val user_hooks : Hooks) in let module Impl : Faked_services.Mocked_services_hooks = struct type mempool = Mockup.M.Block_services.Mempool.t let monitor_heads () = let next () = let rec pop_until_ok () = Lwt_pipe.Unbounded.pop state.heads_pipe >>= fun (block_hash, block_header) -> User_hooks.on_new_head ~block_hash ~block_header >>= function | None -> pop_until_ok () | Some head -> Lwt.return_some head in pop_until_ok () in let shutdown () = () in RPC_answer.{next; shutdown} let monitor_bootstrapped () = let first_run = ref true in let next () = if !first_run then ( first_run := false ; let b = match state.chain with [] -> assert false | b :: _ -> b in let head_hash = b.rpc_context.block_hash in let timestamp = b.rpc_context.block_header.timestamp in Lwt.return_some (head_hash, timestamp)) else Lwt.return_none in let shutdown () = () in RPC_answer.{next; shutdown} let protocols (block : Tezos_shell_services.Block_services.block) = locate_block state block >>=? fun x -> let hash = x.rpc_context.block_hash in let is_predecessor_of_genesis = match block with | `Hash (requested_hash, rel) -> Int.equal rel 0 && Block_hash.equal requested_hash genesis_predecessor_block_hash | _ -> false in (* It is important to tell the baker that the genesis block is not in the alpha protocol (we use Protocol_hash.zero). This will make the baker not try to propose alternatives to that block and just accept it as final in that Protocol_hash.zero protocol. The same for predecessor of genesis, it should be in Protocol_hash.zero. *) return Tezos_shell_services.Block_services. { current_protocol = (if Block_hash.equal hash genesis_block_hash || is_predecessor_of_genesis then Protocol_hash.zero else Protocol.hash); next_protocol = (if is_predecessor_of_genesis then Protocol_hash.zero else Protocol.hash); } let header (block : Tezos_shell_services.Block_services.block) : Mockup.M.Block_services.block_header tzresult Lwt.t = locate_block state block >>=? fun x -> return { Mockup.M.Block_services.hash = x.rpc_context.block_hash; chain_id; shell = x.rpc_context.block_header; protocol_data = x.protocol_data; } let operations block = locate_block state block >>=? fun x -> return x.operations let inject_block block_hash (block_header : Block_header.t) operations = parse_protocol_data block_header.protocol_data >>=? fun protocol_data -> get_block_level block_header >>=? fun level -> get_block_round block_header >>=? fun round -> User_hooks.on_inject_block ~level ~round ~block_hash ~block_header ~operations ~protocol_data >>=? fun (block_hash1, block_header1, operations1, propagation_vector) -> handle_propagation (Broadcast_block (block_hash1, block_header1, operations1)) propagation_vector state.broadcast_pipes let all_pipes_or_select = function | None -> return state.broadcast_pipes | Some l -> List.map_es (fun n -> match List.nth_opt state.broadcast_pipes n with | None -> failwith "Node number %d is out of range (max is %d)" n (List.length state.broadcast_pipes - 1) | Some pipe -> return pipe) l let broadcast_block ?dests block_hash (block_header : Block_header.t) operations = all_pipes_or_select dests >>=? fun pipes -> List.iter_s (fun pipe -> Lwt_pipe.Unbounded.push pipe (Broadcast_block (block_hash, block_header, operations)) ; Lwt.return ()) pipes >>= return let inject_operation (Operation.{shell; proto} as op) = let op_hash = Operation.hash op in let proto_op_opt = Data_encoding.Binary.of_bytes Protocol.operation_data_encoding proto in match proto_op_opt with | Error _ -> failwith "inject_operation: cannot parse operation" | Ok protocol_data -> let op : Protocol.Alpha_context.packed_operation = {shell; protocol_data} in User_hooks.on_inject_operation ~op_hash ~op >>=? fun (op_hash1, op1, propagation_vector) -> handle_propagation (Broadcast_op (op_hash1, op1)) propagation_vector state.broadcast_pipes >>=? fun () -> return op_hash1 let broadcast_operation ?dests (op : Protocol.Alpha_context.packed_operation) = all_pipes_or_select dests >>=? fun pipes -> let op_hash = Alpha_context.Operation.hash_packed op in List.iter_s (fun pipe -> Lwt_pipe.Unbounded.push pipe (Broadcast_op (op_hash, op)) ; Lwt.return ()) pipes >>= return let pending_operations () = let ops = state.mempool in Lwt.return Mockup.M.Block_services.Mempool. { applied = ops; refused = Operation_hash.Map.empty; outdated = Operation_hash.Map.empty; branch_refused = Operation_hash.Map.empty; branch_delayed = Operation_hash.Map.empty; unprocessed = Operation_hash.Map.empty; } let monitor_operations ~applied ~branch_delayed ~branch_refused ~refused = ignore applied ; ignore branch_delayed ; ignore branch_refused ; ignore refused ; let streamed = ref false in state.streaming_operations <- true ; let next () = let rec pop_until_ok () = Lwt_pipe.Unbounded.pop state.operations_pipe >>= function | None when !streamed -> Lwt.return None | None -> streamed := true ; Lwt.return (Some []) | Some op -> ( User_hooks.on_new_operation op >>= function | None when !streamed -> pop_until_ok () | None -> streamed := true ; Lwt.return (Some []) | Some (oph, op) -> streamed := true ; Lwt.return (Some [((oph, op), None)])) in pop_until_ok () in let shutdown () = () in RPC_answer.{next; shutdown} let rpc_context_callback block = locate_block state block >>=? fun x -> return x.rpc_context let list_blocks ~heads ~length ~min_date:_ = let compare_block_fitnesses block0 block1 = Fitness.compare block0.rpc_context.block_header.fitness block1.rpc_context.block_header.fitness in let hash_of_block block = block.rpc_context.block_hash in let lookup_head head = locate_blocks state (`Hash (head, 0)) >>=? fun xs -> let segment = match length with None -> xs | Some n -> List.take_n n xs in return (List.map hash_of_block (List.sort compare_block_fitnesses segment)) in List.map_es lookup_head heads let live_blocks block = live_blocks state block let raw_protocol_data block = locate_block state block >>=? fun x -> return x.raw_protocol_data end in (module Impl) (** Return the current head. *) let head {chain; _} = match List.hd chain with | None -> failwith "mockup_simulator.ml: empty chain" | Some hd -> return hd (** Clear from the mempool operations whose branch does not point to a live block with respect to the current head. *) let clear_mempool state = head state >>=? fun head -> let included_ops_hashes = List.map (fun (op : Mockup.M.Block_services.operation) -> op.hash) (List.flatten head.operations) in live_blocks state (`Head 0) >>=? fun live_set -> let mempool = List.filter (fun (_oph, (op : Mockup.M.Protocol.operation)) -> let included_in_head = List.mem ~equal:Operation_hash.equal (Alpha_context.Operation.hash_packed op) included_ops_hashes in Block_hash.Set.mem op.shell.branch live_set && not included_in_head) state.mempool in state.mempool <- mempool ; return_unit (** Apply a block to the given [rpc_context]. *) let reconstruct_context (rpc_context : Tezos_protocol_environment.rpc_context) (operations : Operation.t list list) (block_header : Block_header.t) = let header = rpc_context.block_header in let predecessor_context = rpc_context.context in parse_protocol_data block_header.protocol_data >>=? fun protocol_data -> Mockup.M.Protocol.begin_application ~chain_id ~predecessor_context ~predecessor_timestamp:header.timestamp ~predecessor_fitness:header.fitness ~cache:`Lazy {shell = block_header.shell; protocol_data} >>=? fun validation_state -> let i = ref 0 in List.fold_left_es (List.fold_left_es (fun (validation_state, results) op -> incr i ; let operation_data = Data_encoding.Binary.of_bytes_exn Mockup.M.Protocol.operation_data_encoding op.Operation.proto in let op = {Mockup.M.Protocol.shell = op.shell; protocol_data = operation_data} in Mockup.M.Protocol.apply_operation validation_state op >>=? fun (validation_state, receipt) -> return (validation_state, receipt :: results))) (validation_state, []) operations >>=? fun (validation_state, _) -> Mockup.M.Protocol.finalize_block validation_state None (** Process an incoming block. If validation succeeds: - update the current head to this new block - cleanup outdated operations - cleanup listener table Note that this implementation does not handle concurrent branches. *) let rec process_block state block_hash (block_header : Block_header.t) operations = let get_predecessor () = let predecessor_hash = block_header.Block_header.shell.predecessor in head state >>=? fun head -> match Block_hash.Table.find state.chain_table predecessor_hash with | None | Some [] -> ( (* Even if the predecessor is not known locally, it might be known by some node in the network. The code below "requests" information about the block by its hash. *) match Block_hash.Table.find state.global_chain_table predecessor_hash with | None -> failwith "get_predecessor: unknown predecessor block" | Some predecessor -> let predecessor_block_header = Block_header. { shell = predecessor.rpc_context.block_header; protocol_data = predecessor.raw_protocol_data; } in let predecessor_ops = List.map (fun xs -> List.map (fun (op : Mockup.M.Block_services.operation) -> Operation. { shell = op.shell; proto = Data_encoding.Binary.to_bytes_exn Protocol.operation_data_encoding op.protocol_data; }) xs) predecessor.operations in (* If the block is found, apply it before proceeding. *) process_block state predecessor.rpc_context.block_hash predecessor_block_header predecessor_ops >>=? fun () -> return predecessor) | Some (predecessor :: _) -> if Int32.sub head.rpc_context.block_header.level predecessor.rpc_context.block_header.level <= 2l then return predecessor else failwith "get_predecessor: the predecessor block is too old" in match Block_hash.Table.find state.chain_table block_hash with | Some _ -> (* The block is already known. *) return_unit | None -> get_predecessor () >>=? fun predecessor -> head state >>=? fun head -> reconstruct_context predecessor.rpc_context operations block_header >>=? fun ({context; _}, _) -> let rpc_context = Tezos_protocol_environment. {context; block_hash; block_header = block_header.shell} in let operations = List.map (fun pass -> List.map (fun (Operation.{shell; proto} as op) -> let hash : Operation_hash.t = Operation.hash op in let protocol_data : Alpha_context.packed_protocol_data = Data_encoding.Binary.of_bytes_exn Protocol.operation_data_encoding proto in { Mockup.M.Block_services.chain_id; hash; shell; protocol_data; receipt = Empty; }) pass) operations in parse_protocol_data block_header.protocol_data >>=? fun protocol_data -> let new_block = { rpc_context; protocol_data; raw_protocol_data = block_header.protocol_data; operations; } in let predecessor_hash = block_header.Block_header.shell.predecessor in let tail = Block_hash.Table.find state.chain_table predecessor_hash |> WithExceptions.Option.get ~loc:__LOC__ in let new_chain = new_block :: tail in Block_hash.Table.replace state.chain_table block_hash new_chain ; Block_hash.Table.replace state.global_chain_table block_hash new_block ; Context_hash.Table.replace state.ctxt_table rpc_context.Environment_context.block_header.context rpc_context ; if Fitness.( block_header.shell.fitness > head.rpc_context.block_header.fitness) then ( state.chain <- new_chain ; clear_mempool state >>=? fun () -> (* The head has changed, the messages in the operations pipe are no good anymore. *) ignore (Lwt_pipe.Unbounded.pop_all_now state.operations_pipe) ; (if state.streaming_operations then ( state.streaming_operations <- false ; Lwt_pipe.Unbounded.push state.operations_pipe None ; Lwt.return ()) else Lwt.return ()) >>= fun () -> (* Put back in the pipe operations that are still alive. *) List.iter_s (fun op -> Lwt_pipe.Unbounded.push state.operations_pipe (Some op) ; Lwt.return ()) state.mempool >>= fun () -> return_unit) else return_unit (** This process listens to broadcast block and operations and incorporates them in the context of the fake node. *) let rec listener ~(user_hooks : (module Hooks)) ~state ~broadcast_pipe = let module User_hooks = (val user_hooks : Hooks) in Lwt_pipe.Unbounded.pop broadcast_pipe >>= function | Broadcast_op (operation_hash, packed_operation) -> state.mempool <- (operation_hash, packed_operation) :: state.mempool ; Lwt_pipe.Unbounded.push state.operations_pipe (Some (operation_hash, packed_operation)) ; User_hooks.check_mempool_after_processing ~mempool:state.mempool >>=? fun () -> listener ~user_hooks ~state ~broadcast_pipe | Broadcast_block (block_hash, block_header, operations) -> get_block_level block_header >>=? fun level -> get_block_round block_header >>=? fun round -> parse_protocol_data block_header.protocol_data >>=? fun protocol_data -> User_hooks.check_block_before_processing ~level ~round ~block_hash ~block_header ~protocol_data >>=? fun () -> process_block state block_hash block_header operations >>=? fun () -> User_hooks.check_chain_after_processing ~level ~round ~chain:state.chain >>=? fun () -> Lwt_pipe.Unbounded.push state.heads_pipe (block_hash, block_header) ; listener ~user_hooks ~state ~broadcast_pipe (** Create a fake node state. *) let create_fake_node_state ~i ~live_depth ~(genesis_block : Block_header.t * Environment_context.rpc_context) ~global_chain_table ~broadcast_pipes = let (block_header0, rpc_context0) = genesis_block in parse_protocol_data block_header0.protocol_data >>=? fun protocol_data -> let genesis0 = { rpc_context = rpc_context0; protocol_data; raw_protocol_data = block_header0.protocol_data; operations = [[]; []; []; []]; } in let chain0 = [genesis0] in let heads_pipe = Lwt_pipe.Unbounded.create () in let operations_pipe = Lwt_pipe.Unbounded.create () in let genesis_block_true_hash = Block_header.hash { shell = rpc_context0.block_header; protocol_data = block_header0.protocol_data; } in Lwt_pipe.Unbounded.push heads_pipe (rpc_context0.block_hash, block_header0) ; return { instance_index = i; live_depth; mempool = []; chain = chain0; chain_table = Block_hash.Table.of_seq (List.to_seq [ (rpc_context0.block_hash, chain0); (genesis_block_true_hash, chain0); (genesis_predecessor_block_hash, chain0); ]); global_chain_table; ctxt_table = Context_hash.Table.of_seq (List.to_seq [ ( rpc_context0.Environment_context.block_header .Block_header.context, rpc_context0 ); ]); heads_pipe; operations_pipe; streaming_operations = false; broadcast_pipes; genesis_block_true_hash; } (** Start baker process. *) let baker_process ~(delegates : Baking_state.delegate list) ~base_dir ~(genesis_block : Block_header.t * Environment_context.rpc_context) ~i ~global_chain_table ~broadcast_pipes ~(user_hooks : (module Hooks)) = let broadcast_pipe = List.nth broadcast_pipes i |> WithExceptions.Option.get ~loc:__LOC__ in create_fake_node_state ~i ~live_depth:60 ~genesis_block ~global_chain_table ~broadcast_pipes >>=? fun state -> let filesystem = String.Hashtbl.create 10 in let wallet = new Faked_client_context.faked_io_wallet ~base_dir ~filesystem in let cctxt = let hooks = make_mocked_services_hooks state user_hooks in new Protocol_client_context.wrap_full (new Faked_client_context.unix_faked ~base_dir ~filesystem ~chain_id ~hooks) in let module User_hooks = (val user_hooks : Hooks) in User_hooks.on_start_baker ~baker_position:i ~delegates ~cctxt >>= fun () -> List.iter_es (fun ({alias; public_key; public_key_hash; secret_key_uri} : Baking_state.delegate) -> let open Tezos_client_base in let name = alias |> WithExceptions.Option.get ~loc:__LOC__ in Client_keys.neuterize secret_key_uri >>=? fun public_key_uri -> Client_keys.register_key wallet ~force:false (public_key_hash, public_key_uri, secret_key_uri) ~public_key name) delegates >>=? fun () -> let context_index = let open Abstract_context_index in { checkout_fun = (fun hash -> Context_hash.Table.find state.ctxt_table hash |> Option.map (fun Environment_context.{context; _} -> context) |> Lwt.return); finalize_fun = Lwt.return; } in let module User_hooks = (val user_hooks : Hooks) in let listener_process () = listener ~user_hooks ~state ~broadcast_pipe in let stop_on_event event = User_hooks.stop_on_event event in let baker_process () = Faked_daemon.Baker.run ~cctxt ~stop_on_event ~chain_id ~context_index ~delegates in Lwt.pick [listener_process (); baker_process ()] >>=? fun () -> User_hooks.check_chain_on_success ~chain:state.chain let genesis_protocol_data (baker_sk : Signature.secret_key) (predecessor_block_hash : Block_hash.t) (block_header : Block_header.shell_header) : Bytes.t = let proof_of_work_nonce = Bytes.create Protocol.Alpha_context.Constants.proof_of_work_nonce_size in let operation_list_hash = Operation_list_hash.compute [] in let payload_hash = Protocol.Alpha_context.Block_payload.hash ~predecessor:predecessor_block_hash Alpha_context.Round.zero operation_list_hash in let contents = Protocol.Alpha_context.Block_header. { payload_hash; payload_round = Alpha_context.Round.zero; proof_of_work_nonce; seed_nonce_hash = None; liquidity_baking_escape_vote = Baking_configuration.default_liquidity_baking_escape_vote; } in let unsigned_header = Data_encoding.Binary.to_bytes_exn Protocol.Alpha_context.Block_header.unsigned_encoding (block_header, contents) in let signature = Signature.sign ~watermark: Alpha_context.Block_header.(to_watermark (Block_header chain_id)) baker_sk unsigned_header in Data_encoding.Binary.to_bytes_exn Protocol.Alpha_context.Block_header.protocol_data_encoding {contents; signature} (** Figure out who should be the signer for the genesis block. *) let deduce_baker_sk (accounts_with_secrets : (Protocol.Alpha_context.Parameters.bootstrap_account * Tezos_mockup_commands.Mockup_wallet.bootstrap_secret) list) (total_accounts : int) (level : int) : Signature.secret_key tzresult Lwt.t = (match (total_accounts, level) with | (_, 0) -> return 0 (* apparently this doesn't really matter *) | _ -> failwith "cannot deduce baker for a genesis block, total accounts = %d, level = \ %d" total_accounts level) >>=? fun baker_index -> let (_, secret) = List.nth accounts_with_secrets baker_index |> WithExceptions.Option.get ~loc:__LOC__ in let secret_key = Signature.Secret_key.of_b58check_exn (Uri.path (secret.sk_uri :> Uri.t)) in return secret_key (** Generate the two initial genesis blocks. *) let make_genesis_context ~delegate_selection ~round0 ~round1 ~consensus_committee_size ~consensus_threshold accounts_with_secrets (total_accounts : int) = let default_constants = Mockup.Protocol_parameters.default_value.constants in let round_durations = let open Alpha_context in Stdlib.Option.get (Round.Durations.create_opt ~first_round_duration:(Period.of_seconds_exn round0) ~delay_increment_per_round: (Period.of_seconds_exn (Int64.sub round1 round0))) in let constants = { default_constants with delegate_selection; consensus_committee_size; consensus_threshold; minimal_block_delay = Alpha_context.Period.of_seconds_exn (max 1L round0); delay_increment_per_round = Alpha_context.Period.of_seconds_exn Int64.(max 1L (sub round1 round0)); } in let common_parameters = Mockup.Protocol_parameters.{default_value with constants} in let make_block0 initial_timestamp = let parameters = {common_parameters with initial_timestamp} in let reencoded_parameters = Data_encoding.Binary.of_bytes_exn Mockup.M.parameters_encoding @@ Data_encoding.Binary.to_bytes_exn Mockup.Protocol_parameters.encoding parameters in let from_bootstrap_account i ( (account : Protocol.Alpha_context.Parameters.bootstrap_account), (secret : Tezos_mockup_commands.Mockup_wallet.bootstrap_secret) ) : Mockup.Parsed_account.t = { name = Format.sprintf "bootstrap%d" (i + 1); sk_uri = secret.sk_uri; amount = account.amount; } in let bootstrap_accounts = Data_encoding.Json.construct (Data_encoding.list Mockup.Parsed_account.encoding) (List.mapi from_bootstrap_account accounts_with_secrets) in Mockup.M.init ~cctxt:Faked_client_context.logger ~parameters:reencoded_parameters ~constants_overrides_json:None ~bootstrap_accounts_json:(Some bootstrap_accounts) >>=? fun {chain = _; rpc_context = rpc_context0; protocol_data = _} -> let block_header0 = { rpc_context0.block_header with predecessor = genesis_predecessor_block_hash; } in let rpc_context = {rpc_context0 with block_header = block_header0} in deduce_baker_sk accounts_with_secrets total_accounts 0 >>=? fun baker_sk -> let protocol_data = genesis_protocol_data baker_sk genesis_predecessor_block_hash rpc_context.block_header in let block_header = Block_header.{shell = rpc_context.block_header; protocol_data} in return (block_header, rpc_context) in let level0_round0_duration = Protocol.Alpha_context.Round.round_duration round_durations Alpha_context.Round.zero in let timestamp0 = Time.Protocol.of_seconds Int64.( sub (of_float (Unix.time ())) (Alpha_context.Period.to_seconds level0_round0_duration)) in make_block0 timestamp0 (** By default, propagate every message everywhere. *) let default_propagation_vector = List.repeat 5 Pass module Default_hooks : Hooks = struct let on_inject_block ~level:_ ~round:_ ~block_hash ~block_header ~operations ~protocol_data:_ = return (block_hash, block_header, operations, default_propagation_vector) let on_inject_operation ~op_hash ~op = return (op_hash, op, default_propagation_vector) let on_new_head ~block_hash ~block_header = Lwt.return (Some (block_hash, block_header)) let on_new_operation x = Lwt.return_some x let check_block_before_processing ~level:_ ~round:_ ~block_hash:_ ~block_header:_ ~protocol_data:_ = return_unit let check_chain_after_processing ~level:_ ~round:_ ~chain:_ = return_unit let check_mempool_after_processing ~mempool:_ = return_unit let stop_on_event _ = false let on_start_baker ~baker_position:_ ~delegates:_ ~cctxt:_ = Lwt.return_unit let check_chain_on_success ~chain:_ = return_unit end type config = { debug : bool; round0 : int64; round1 : int64; timeout : int; delegate_selection : Alpha_context.Constants.delegate_selection; consensus_committee_size : int; consensus_threshold : int; } let default_config = { debug = false; round0 = 2L; (* Rounds should be long enough for the bakers to exchange all the necessary messages. *) round1 = 3L (* No real need to increase round durations. *); timeout = 30; delegate_selection = Random; consensus_committee_size = Default_parameters.constants_mainnet.consensus_committee_size; consensus_threshold = Default_parameters.constants_mainnet.consensus_threshold; } let make_baking_delegate ( (account : Alpha_context.Parameters.bootstrap_account), (secret : Tezos_mockup_commands.Mockup_wallet.bootstrap_secret) ) : Baking_state.delegate = Baking_state. { alias = Some secret.name; public_key = account.public_key |> WithExceptions.Option.get ~loc:__LOC__; public_key_hash = account.public_key_hash; secret_key_uri = secret.sk_uri; } let run ?(config = default_config) bakers_spec = Tezos_client_base.Client_keys.register_signer (module Tezos_signer_backends.Unencrypted) ; let total_accounts = List.fold_left (fun acc (n, _) -> acc + n) 0 bakers_spec in if total_accounts = 0 then failwith "the simulation should use at least one delegate" else if total_accounts > 5 then failwith "only up to 5 bootstrap accounts are available" else (* When logging is enabled it may cause non-termination: https://gitlab.com/nomadic-labs/tezos/-/issues/546 In particular, it seems that when logging is enabled the baker process can get cancelled without executing its Lwt finalizer. *) (if config.debug then Tezos_base_unix.Internal_event_unix.init () else Lwt.return_unit) >>= fun () -> let total_bakers = List.length bakers_spec in (List.init ~when_negative_length:() total_bakers (fun _ -> Lwt_pipe.Unbounded.create ()) |> function | Error () -> failwith "impossible: negative length of the baker spec" | Ok xs -> return xs) >>=? fun broadcast_pipes -> let global_chain_table = Block_hash.Table.create 10 in Tezos_mockup_commands.Mockup_wallet.default_bootstrap_accounts >>=? fun bootstrap_secrets -> let accounts_with_secrets = List.combine_drop (List.take_n total_accounts accounts) bootstrap_secrets in let all_delegates = List.map make_baking_delegate accounts_with_secrets in make_genesis_context ~delegate_selection:config.delegate_selection ~round0:config.round0 ~round1:config.round1 ~consensus_committee_size:config.consensus_committee_size ~consensus_threshold:config.consensus_threshold accounts_with_secrets total_accounts >>=? fun genesis_block -> let take_third (_, _, x) = x in let timeout_process () = Lwt_unix.sleep (Float.of_int config.timeout) >>= fun () -> failwith "the test is taking longer than %d seconds@." config.timeout in Lwt.pick [ timeout_process (); Lwt_result_syntax.tzjoin (take_third (List.fold_left (fun (i, delegates_acc, ms) (n, user_hooks) -> let (delegates, leftover_delegates) = List.split_n n delegates_acc in let m = baker_process ~delegates ~base_dir:"dummy" ~genesis_block ~i ~global_chain_table ~broadcast_pipes ~user_hooks in (i + 1, leftover_delegates, m :: ms)) (0, all_delegates, []) bakers_spec)); ] let get_account_pk i = match List.nth accounts i with | None -> assert false | Some acc -> acc.public_key |> WithExceptions.Option.get ~loc:__LOC__ let bootstrap1 = get_account_pk 0 let bootstrap2 = get_account_pk 1 let bootstrap3 = get_account_pk 2 let bootstrap4 = get_account_pk 3 let bootstrap5 = get_account_pk 4 let check_block_signature ~block_hash ~(block_header : Block_header.t) ~public_key = let (protocol_data : Protocol.Alpha_context.Block_header.protocol_data) = Data_encoding.Binary.of_bytes_exn Protocol.Alpha_context.Block_header.protocol_data_encoding block_header.protocol_data in let unsigned_header = Data_encoding.Binary.to_bytes_exn Protocol.Alpha_context.Block_header.unsigned_encoding (block_header.shell, protocol_data.contents) in if Signature.check ~watermark: Alpha_context.Block_header.(to_watermark (Block_header chain_id)) public_key protocol_data.signature unsigned_header then return_unit else failwith "unexpected signature for %a; tried with %a@." Block_hash.pp block_hash Signature.Public_key.pp public_key type op_predicate = Operation_hash.t -> Alpha_context.packed_operation -> bool tzresult Lwt.t let mempool_count_ops ~mempool ~predicate = List.map_es (fun (op_hash, op) -> predicate op_hash op) mempool >>=? fun results -> return (List.fold_left (fun acc result -> if result then acc + 1 else acc) 0 results) let mempool_has_op ~mempool ~predicate = mempool_count_ops ~mempool ~predicate >>=? fun n -> return (n > 0) let mempool_has_op_ref ~mempool ~predicate ~var = mempool_has_op ~mempool ~predicate >>=? fun result -> if result then var := true ; return_unit let op_is_signed_by ~public_key (op_hash : Operation_hash.t) (op : Alpha_context.packed_operation) = match op.protocol_data with | Operation_data d -> ( (match d.contents with | Single op_contents -> return (match op_contents with | Endorsement _ -> Alpha_context.Operation.to_watermark (Endorsement chain_id) | Preendorsement _ -> Alpha_context.Operation.to_watermark (Preendorsement chain_id) | _ -> Signature.Generic_operation) | _ -> failwith "unexpected contents in %a@." Operation_hash.pp op_hash) >>=? fun watermark -> match d.signature with | None -> failwith "did not find a signature for op %a@." Operation_hash.pp op_hash | Some signature -> let unsigned_operation_bytes = Data_encoding.Binary.to_bytes_exn Protocol.Alpha_context.Operation.unsigned_encoding (op.shell, Contents_list d.contents) in return (Signature.check ~watermark public_key signature unsigned_operation_bytes)) let op_is_preendorsement ?level ?round (op_hash : Operation_hash.t) (op : Alpha_context.packed_operation) = match op.protocol_data with | Operation_data d -> ( match d.contents with | Single op_contents -> ( match op_contents with | Preendorsement consensus_content -> let right_level = match level with | None -> true | Some expected_level -> Int32.equal (Alpha_context.Raw_level.to_int32 consensus_content.level) expected_level in let right_round = match round with | None -> true | Some expected_round -> Int32.equal (Alpha_context.Round.to_int32 consensus_content.round) expected_round in return (right_level && right_round) | _ -> return false) | _ -> failwith "unexpected contents in %a@." Operation_hash.pp op_hash) let op_is_endorsement ?level ?round (op_hash : Operation_hash.t) (op : Alpha_context.packed_operation) = match op.protocol_data with | Operation_data d -> ( match d.contents with | Single op_contents -> ( match op_contents with | Endorsement consensus_content -> let right_level = match level with | None -> true | Some expected_level -> Int32.equal (Alpha_context.Raw_level.to_int32 consensus_content.level) expected_level in let right_round = match round with | None -> true | Some expected_round -> Int32.equal (Alpha_context.Round.to_int32 consensus_content.round) expected_round in return (right_level && right_round) | _ -> return false) | _ -> failwith "unexpected contents in %a@." Operation_hash.pp op_hash) let op_is_both f g op_hash op = f op_hash op >>=? fun f_result -> if f_result then g op_hash op else return false let save_proposal_payload ~(protocol_data : Alpha_context.Block_header.protocol_data) ~var = var := Some (protocol_data.contents.payload_hash, protocol_data.contents.payload_round) ; return_unit let verify_payload_hash ~(protocol_data : Alpha_context.Block_header.protocol_data) ~original_proposal ~message = match !original_proposal with | None -> failwith "verify_payload_hash: expected to have observed a proposal by now" | Some (original_hash, original_round) -> if Protocol.Block_payload_hash.equal original_hash protocol_data.contents.payload_hash && Protocol.Alpha_context.Round.equal original_round protocol_data.contents.payload_round then return_unit else failwith "verify_payload_hash: %s" message let get_block_round block = round_from_raw_fitness block.rpc_context.block_header.fitness
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2021 Nomadic Labs, <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
dune
; This file was automatically generated, do not edit. ; Edit file manifest/main.ml instead. (library (name tezos_benchmark_016_PtMumbai) (public_name tezos-benchmark-016-PtMumbai) (libraries tezos-stdlib tezos-base tezos-error-monad tezos-micheline tezos-micheline-rewriting tezos-benchmark tezos-benchmark-type-inference-016-PtMumbai tezos-protocol-016-PtMumbai tezos-crypto tezos-protocol-016-PtMumbai.parameters hashcons tezos-016-PtMumbai-test-helpers prbnmcn-stats) (library_flags (:standard -linkall)) (flags (:standard) -open Tezos_stdlib -open Tezos_base -open Tezos_base.TzPervasives.Error_monad.Legacy_monad_globals -open Tezos_error_monad -open Tezos_micheline -open Tezos_micheline_rewriting -open Tezos_benchmark -open Tezos_benchmark_type_inference_016_PtMumbai -open Tezos_protocol_016_PtMumbai -open Tezos_016_PtMumbai_test_helpers) (private_modules kernel rules state_space))
nopres_impl.ml
module type Implementation = sig type el type t val name : string val length : t -> int val create : int -> t val make : int -> el -> t val unsafe_get : t -> int -> el val unsafe_set : t -> int -> el -> unit val unsafe_blit : t -> int -> t -> int -> int -> unit end module Make (S : Strat.T) (Impl : Implementation) = struct module Strategy = S type strategy = Strategy.t type el = Impl.el type t = { mutable ar : Impl.t; mutable vlix : int; mutable strategy : strategy } let name = Impl.name let invalid_arg str = invalid_arg (name ^ "." ^ str) let failwith str = failwith (name ^ "." ^ str) let length ra = ra.vlix + 1 let lix ra = ra.vlix let real_length ra = Impl.length ra.ar let real_lix ra = real_length ra - 1 let unsafe_get ra ix = Impl.unsafe_get ra.ar ix let unsafe_set ra ix el = Impl.unsafe_set ra.ar ix el let get ra n = if n > ra.vlix || n < 0 then invalid_arg "get" else unsafe_get ra n let set ra n el = if n > ra.vlix || n < 0 then invalid_arg "set" else unsafe_set ra n el let creator = Impl.create let empty_ar = Impl.create 0 let screate strategy n = let res = { ar = empty_ar; vlix = n - 1; strategy = strategy } in res.ar <- creator (Strategy.grow strategy n); res let smake strategy n x = let res = { ar = empty_ar; vlix = n - 1; strategy = strategy } in res.ar <- Impl.make (Strategy.grow strategy n) x; res let create_fresh n = screate Strategy.default n let create_from ra = { ar = creator (length ra); vlix = ra.vlix; strategy = ra.strategy } let sempty strategy = let res = { ar = empty_ar; vlix = -1; strategy = strategy } in res.ar <- creator (Strategy.grow strategy 0); res let empty () = sempty Strategy.default let create = screate Strategy.default let make = smake Strategy.default let sinit strategy n f = let res = smake strategy n (f 0) in let ar = res.ar in for i = 1 to n - 1 do Impl.unsafe_set ar i (f i) done; res let init n f = sinit Strategy.default n f let get_strategy ra = ra.strategy let resizer some_lix ({ ar = ar} as ra) len = let new_ar = creator len in for i = 0 to some_lix do Impl.unsafe_set new_ar i (Impl.unsafe_get ar i) done; ra.ar <- new_ar let enforce_strategy ra = let real_len = real_length ra in let new_len = length ra in let new_real_len = Strategy.shrink ra.strategy ~real_len ~new_len in if new_real_len <> -1 then resizer ra.vlix ra new_real_len let set_strategy ra strategy = ra.strategy <- strategy; enforce_strategy ra let put_strategy ra strategy = ra.strategy <- strategy let unsafe_blit_on_other ra1 ofs1 ra2 = Impl.unsafe_blit ra1.ar ofs1 ra2.ar let copy ra = let len = length ra in let ar = Impl.create len in Impl.unsafe_blit ra.ar 0 ar 0 len; { ra with ar = ar } let append ra1 ra2 = match ra1.vlix, ra2.vlix with | -1, -1 -> empty () | _, -1 -> copy ra1 | -1, _ -> copy ra2 | _ -> let len1 = length ra1 in let len2 = length ra2 in let res = create_fresh (len1 + len2) in unsafe_blit_on_other ra1 0 res 0 len1; unsafe_blit_on_other ra2 0 res len1 len2; res let rec concat_aux res offset = function | [] -> res | h::t -> if h.vlix < 0 then concat_aux res offset t else let len = length h in unsafe_blit_on_other h 0 res offset len; concat_aux res (offset + len) t let concat l = let len = List.fold_left (fun a el -> a + length el) 0 l in if len = 0 then empty () else concat_aux (create_fresh len) 0 l let unsafe_sub ra ofs len = let res = create_fresh len in unsafe_blit_on_other ra ofs res 0 len; res let sub ra ofs len = if ofs < 0 || len < 0 || ofs + len > length ra then invalid_arg "sub" else unsafe_sub ra ofs len let guarantee_ix ra ix = if real_lix ra < ix then resizer ra.vlix ra (Strategy.grow ra.strategy (ix + 1)) let maybe_grow_ix ra new_lix = guarantee_ix ra new_lix; ra.vlix <- new_lix let add_one ra x = let n = length ra in maybe_grow_ix ra n; unsafe_set ra n x let unsafe_remove_one ra = ra.vlix <- ra.vlix - 1; enforce_strategy ra let remove_one ra = if ra.vlix < 0 then failwith "remove_one" else unsafe_remove_one ra let unsafe_remove_n ra n = ra.vlix <- ra.vlix - n; enforce_strategy ra let remove_n ra n = if n > length ra || n < 0 then invalid_arg "remove_n" else unsafe_remove_n ra n let unsafe_remove_range ra ofs len = let ofs_len = ofs + len in unsafe_blit_on_other ra ofs_len ra ofs (length ra - ofs_len); unsafe_remove_n ra len let remove_range ra ofs len = if ofs < 0 || len < 0 || ofs + len > length ra then invalid_arg "remove_range" else unsafe_remove_range ra ofs len let clear ra = ra.vlix <- -1; enforce_strategy ra let unsafe_swap { ar = ar } n m = let tmp = Impl.unsafe_get ar n in Impl.unsafe_set ar n (Impl.unsafe_get ar m); Impl.unsafe_set ar m tmp let swap ra n m = if n > ra.vlix || m > ra.vlix || n < 0 || m < 0 then invalid_arg "swap" else unsafe_swap ra n m let unsafe_swap_in_last ({ ar = ar } as ra) n = Impl.unsafe_set ar n (Impl.unsafe_get ar ra.vlix); unsafe_remove_one ra let swap_in_last ra n = if n > ra.vlix || n < 0 then invalid_arg "swap_in_last" else unsafe_swap_in_last ra n let unsafe_fill ({ ar = ar } as ra) ofs len x = let last = ofs + len - 1 in maybe_grow_ix ra (max last ra.vlix); for i = ofs to last do Impl.unsafe_set ar i x done let fill ra ofs len x = if ofs < 0 || len < 0 || ofs > length ra then invalid_arg "fill" else unsafe_fill ra ofs len x let unsafe_blit ra1 ofs1 ra2 ofs2 len = guarantee_ix ra2 (ofs2 + len - 1); unsafe_blit_on_other ra1 ofs1 ra2 ofs2 len let blit ra1 ofs1 ra2 ofs2 len = if len < 0 || ofs1 < 0 || ofs2 < 0 || ofs1 + len > length ra1 || ofs2 > length ra2 then invalid_arg "blit" else unsafe_blit ra1 ofs1 ra2 ofs2 len let rec to_list_aux ar i accu = if i < 0 then accu else to_list_aux ar (i - 1) (Impl.unsafe_get ar i :: accu) let to_list ra = to_list_aux ra.ar ra.vlix [] let rec of_list_aux ar i = function | [] -> () | h::t -> Impl.unsafe_set ar i h; of_list_aux ar (i + 1) t let of_list l = let ra = create_fresh (List.length l) in of_list_aux ra.ar 0 l; ra let sof_list strategy l = let ra = screate strategy (List.length l) in of_list_aux ra.ar 0 l; ra let to_array ({ ar = ar } as ra) = Array.init (length ra) (fun i -> Impl.unsafe_get ar i) let sof_array strategy ar = sinit strategy (Array.length ar) (fun i -> Array.unsafe_get ar i) let of_array ar = sof_array Strategy.default ar let iter f ({ ar = ar } as ra) = for i = 0 to ra.vlix do f (Impl.unsafe_get ar i) done let map f ({ ar = ar } as ra) = let res = create_from ra in let res_ar = res.ar in for i = 0 to res.vlix do Impl.unsafe_set res_ar i (f (Impl.unsafe_get ar i)) done; res let iteri f ({ ar = ar } as ra) = for i = 0 to ra.vlix do f i (Impl.unsafe_get ar i) done let mapi f ({ ar = ar } as ra) = let { ar = res_ar } as res = create_from ra in for i = 0 to res.vlix do Impl.unsafe_set res_ar i (f i (Impl.unsafe_get ar i)) done; res let fold_left f accu ({ ar = ar } as ra) = let res = ref accu in for i = 0 to ra.vlix do res := f !res (Impl.unsafe_get ar i) done; !res let fold_right f ({ ar = ar } as ra) accu = let res = ref accu in for i = ra.vlix downto 0 do res := f (Impl.unsafe_get ar i) !res done; !res let rec for_all_aux i p ra = i > ra.vlix || p (unsafe_get ra i) && for_all_aux (i + 1) p ra let for_all p ra = for_all_aux 0 p ra let rec exists_aux i p ra = i <= ra.vlix && (p (unsafe_get ra i) || exists_aux (i + 1) p ra) let exists p ra = exists_aux 0 p ra let rec mem_aux i x ra = i <= ra.vlix && (unsafe_get ra i = x || mem_aux (i + 1) x ra) let mem x ra = mem_aux 0 x ra let rec memq_aux i x ra = i <= ra.vlix && (unsafe_get ra i == x || memq_aux (i + 1) x ra) let memq x ra = memq_aux 0 x ra let rec pos_aux i x ra = if i > ra.vlix then None else if unsafe_get ra i = x then Some i else pos_aux (i + 1) x ra let pos x ra = pos_aux 0 x ra let rec posq_aux i x ra = if i > ra.vlix then None else if unsafe_get ra i == x then Some i else posq_aux (i + 1) x ra let posq x ra = posq_aux 0 x ra let rec find_aux i p ra = if i > ra.vlix then raise Not_found else let el = unsafe_get ra i in if p el then el else find_aux (i + 1) p ra let find p ra = find_aux 0 p ra let rec find_index_aux p ra i = if i > ra.vlix then raise Not_found else if p (unsafe_get ra i) then i else find_index_aux p ra (i + 1) let find_index p ra i = if i < 0 then invalid_arg "find_index" else find_index_aux p ra i let filter p ({ ar = ar } as ra) = let res = sempty ra.strategy in for i = 0 to ra.vlix do let el = Impl.unsafe_get ar i in if p el then add_one res el done; res let find_all = filter let filter_in_place p ({ ar = ar } as ra) = let dest = ref 0 in let pos = ref 0 in while !pos <= ra.vlix do let el = Impl.unsafe_get ar !pos in if p el then begin Impl.unsafe_set ar !dest el; incr dest end; incr pos done; unsafe_remove_n ra (!pos - !dest) let partition p ra = let res1, res2 as res = sempty ra.strategy, sempty ra.strategy in for i = 0 to ra.vlix do let el = unsafe_get ra i in if p el then add_one res1 el else add_one res2 el done; res end
(* RES - Automatically Resizing Contiguous Memory for OCaml Copyright (C) 1999- Markus Mottl email: markus.mottl@gmail.com WWW: http://www.ocaml.info This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *)
fmpz_mat.h
#ifndef FMPZ_MAT_H #define FMPZ_MAT_H #ifdef FMPZ_MAT_INLINES_C #define FMPZ_MAT_INLINE FLINT_DLL #else #define FMPZ_MAT_INLINE static __inline__ #endif #undef ulong #define ulong ulongxx /* interferes with system includes */ #include <stdio.h> #undef ulong #include <gmp.h> #define ulong mp_limb_t #include "flint.h" #include "fmpz.h" #include "fmpz_vec.h" #include "nmod_mat.h" #include "d_mat.h" #include "mpf_mat.h" #ifdef __cplusplus extern "C" { #endif typedef struct { fmpz * entries; slong r; slong c; fmpz ** rows; } fmpz_mat_struct; typedef fmpz_mat_struct fmpz_mat_t[1]; /* Element access ********************************************************/ FMPZ_MAT_INLINE fmpz * fmpz_mat_entry(const fmpz_mat_t mat, slong i, slong j) { return mat->rows[i] + j; } FMPZ_MAT_INLINE slong fmpz_mat_nrows(const fmpz_mat_t mat) { return mat->r; } FMPZ_MAT_INLINE slong fmpz_mat_ncols(const fmpz_mat_t mat) { return mat->c; } /* Memory management ********************************************************/ FLINT_DLL void fmpz_mat_init(fmpz_mat_t mat, slong rows, slong cols); FLINT_DLL void fmpz_mat_init_set(fmpz_mat_t mat, const fmpz_mat_t src); FLINT_DLL void fmpz_mat_swap(fmpz_mat_t mat1, fmpz_mat_t mat2); FMPZ_MAT_INLINE void fmpz_mat_swap_entrywise(fmpz_mat_t mat1, fmpz_mat_t mat2) { slong i, j; for (i = 0; i < fmpz_mat_nrows(mat1); i++) for (j = 0; j < fmpz_mat_ncols(mat1); j++) fmpz_swap(fmpz_mat_entry(mat2, i, j), fmpz_mat_entry(mat1, i, j)); } FLINT_DLL void fmpz_mat_set(fmpz_mat_t mat1, const fmpz_mat_t mat2); FLINT_DLL void fmpz_mat_clear(fmpz_mat_t mat); FLINT_DLL int fmpz_mat_equal(const fmpz_mat_t mat1, const fmpz_mat_t mat2); FLINT_DLL int fmpz_mat_is_zero(const fmpz_mat_t mat); FLINT_DLL int fmpz_mat_is_one(const fmpz_mat_t mat); FMPZ_MAT_INLINE int fmpz_mat_is_zero_row(const fmpz_mat_t mat, slong i) { return _fmpz_vec_is_zero(mat->rows[i], mat->c); } FMPZ_MAT_INLINE int fmpz_mat_col_equal(fmpz_mat_t M, slong m, slong n) { slong i; for (i = 0; i < M->r; i++) { if (!fmpz_equal(M->rows[i] + m, M->rows[i] + n)) return 0; } return 1; } FMPZ_MAT_INLINE int fmpz_mat_row_equal(fmpz_mat_t M, slong m, slong n) { slong i; for (i = 0; i < M->c; i++) { if (!fmpz_equal(M->rows[m] + i, M->rows[n] + i)) return 0; } return 1; } FMPZ_MAT_INLINE int fmpz_mat_is_empty(const fmpz_mat_t mat) { return (mat->r == 0) || (mat->c == 0); } FMPZ_MAT_INLINE int fmpz_mat_is_square(const fmpz_mat_t mat) { return (mat->r == mat->c); } FLINT_DLL void fmpz_mat_zero(fmpz_mat_t mat); FLINT_DLL void fmpz_mat_one(fmpz_mat_t mat); /* Windows and concatenation */ FLINT_DLL void fmpz_mat_window_init(fmpz_mat_t window, const fmpz_mat_t mat, slong r1, slong c1, slong r2, slong c2); FLINT_DLL void fmpz_mat_window_clear(fmpz_mat_t window); FLINT_DLL void fmpz_mat_concat_horizontal(fmpz_mat_t res, const fmpz_mat_t mat1, const fmpz_mat_t mat2); FLINT_DLL void fmpz_mat_concat_vertical(fmpz_mat_t res, const fmpz_mat_t mat1, const fmpz_mat_t mat2); /* Input and output *********************************************************/ FLINT_DLL int fmpz_mat_fprint(FILE * file, const fmpz_mat_t mat); FLINT_DLL int fmpz_mat_fprint_pretty(FILE * file, const fmpz_mat_t mat); FMPZ_MAT_INLINE int fmpz_mat_print(const fmpz_mat_t mat) { return fmpz_mat_fprint(stdout, mat); } FMPZ_MAT_INLINE int fmpz_mat_print_pretty(const fmpz_mat_t mat) { return fmpz_mat_fprint_pretty(stdout, mat); } FLINT_DLL int fmpz_mat_fread(FILE* file, fmpz_mat_t mat); FMPZ_MAT_INLINE int fmpz_mat_read(fmpz_mat_t mat) { return fmpz_mat_fread(stdin, mat); } /* Random matrix generation *************************************************/ FLINT_DLL void fmpz_mat_randbits(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_randtest(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_randtest_unsigned(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_randintrel(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_randsimdioph(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits, flint_bitcnt_t bits2); FLINT_DLL void fmpz_mat_randntrulike(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits, ulong q); FLINT_DLL void fmpz_mat_randntrulike2(fmpz_mat_t mat, flint_rand_t state, flint_bitcnt_t bits, ulong q); FLINT_DLL void fmpz_mat_randajtai(fmpz_mat_t mat, flint_rand_t state, double alpha); FLINT_DLL void fmpz_mat_randrank(fmpz_mat_t mat, flint_rand_t state, slong rank, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_randdet(fmpz_mat_t mat, flint_rand_t state, const fmpz_t det); FLINT_DLL void fmpz_mat_randops(fmpz_mat_t mat, flint_rand_t state, slong count); FLINT_DLL int fmpz_mat_randpermdiag(fmpz_mat_t mat, flint_rand_t state, const fmpz * diag, slong n); /* Norms */ FLINT_DLL slong fmpz_mat_max_bits(const fmpz_mat_t mat); /* Transpose */ FLINT_DLL void fmpz_mat_transpose(fmpz_mat_t B, const fmpz_mat_t A); /* Addition and subtraction */ FLINT_DLL void fmpz_mat_add(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_sub(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_neg(fmpz_mat_t B, const fmpz_mat_t A); /* Scalar operations */ FLINT_DLL void fmpz_mat_scalar_mul_fmpz(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t c); FLINT_DLL void fmpz_mat_scalar_mul_si(fmpz_mat_t B, const fmpz_mat_t A, slong c); FLINT_DLL void fmpz_mat_scalar_mul_ui(fmpz_mat_t B, const fmpz_mat_t A, ulong c); FLINT_DLL void fmpz_mat_scalar_addmul_fmpz(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t c); FLINT_DLL void fmpz_mat_scalar_addmul_si(fmpz_mat_t B, const fmpz_mat_t A, slong c); FLINT_DLL void fmpz_mat_scalar_addmul_ui(fmpz_mat_t B, const fmpz_mat_t A, ulong c); FLINT_DLL void fmpz_mat_scalar_submul_fmpz(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t c); FLINT_DLL void fmpz_mat_scalar_submul_si(fmpz_mat_t B, const fmpz_mat_t A, slong c); FLINT_DLL void fmpz_mat_scalar_submul_ui(fmpz_mat_t B, const fmpz_mat_t A, ulong c); FLINT_DLL void fmpz_mat_scalar_addmul_nmod_mat_fmpz(fmpz_mat_t B, const nmod_mat_t A, const fmpz_t c); FLINT_DLL void fmpz_mat_scalar_addmul_nmod_mat_ui(fmpz_mat_t B, const nmod_mat_t A, ulong c); FLINT_DLL void fmpz_mat_scalar_divexact_fmpz(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t c); FLINT_DLL void fmpz_mat_scalar_divexact_si(fmpz_mat_t B, const fmpz_mat_t A, slong c); FLINT_DLL void fmpz_mat_scalar_divexact_ui(fmpz_mat_t B, const fmpz_mat_t A, ulong c); FLINT_DLL void fmpz_mat_scalar_mul_2exp(fmpz_mat_t B, const fmpz_mat_t A, ulong exp); FLINT_DLL void fmpz_mat_scalar_tdiv_q_2exp(fmpz_mat_t B, const fmpz_mat_t A, ulong exp); FLINT_DLL void fmpz_mat_scalar_smod(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t P); FLINT_DLL void fmpz_mat_scalar_mod_fmpz(fmpz_mat_t B, const fmpz_mat_t A, const fmpz_t m); /* Multiplication */ FLINT_DLL void fmpz_mat_mul(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_mul_classical(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_mul_strassen(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_mul_classical_inline(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_fft(fmpz_mat_t C, const fmpz_mat_t A, slong abits, const fmpz_mat_t B, slong bbits, int sign); FLINT_DLL void fmpz_mat_mul_fft(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_multi_mod(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B, int sign, flint_bitcnt_t Cbits); FLINT_DLL void fmpz_mat_mul_multi_mod(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int _fmpz_mat_mul_blas(fmpz_mat_t C, const fmpz_mat_t A, flint_bitcnt_t Abits, const fmpz_mat_t B, flint_bitcnt_t Bbits, int sign, flint_bitcnt_t Cbits); FLINT_DLL int fmpz_mat_mul_blas(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_small_1(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_small_2a(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_small_2b(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_small_internal(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B, flint_bitcnt_t Cbits); FLINT_DLL void _fmpz_mat_mul_small(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_double_word(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_mul_double_word_internal(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B, int sign, flint_bitcnt_t bits); FLINT_DLL void fmpz_mat_sqr_bodrato(fmpz_mat_t B, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_sqr(fmpz_mat_t B, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_pow(fmpz_mat_t B, const fmpz_mat_t A, ulong exp); FLINT_DLL void fmpz_mat_mul_fmpz_vec(fmpz * c, const fmpz_mat_t A, const fmpz * b, slong blen); FLINT_DLL void fmpz_mat_mul_fmpz_vec_ptr(fmpz * const * c, const fmpz_mat_t A, const fmpz * const * b, slong blen); FLINT_DLL void fmpz_mat_fmpz_vec_mul(fmpz * c, const fmpz * a, slong alen, const fmpz_mat_t B); FLINT_DLL void fmpz_mat_fmpz_vec_mul_ptr(fmpz * const * c, const fmpz * const * a, slong alen, const fmpz_mat_t B); /* Kronecker product */ FLINT_DLL void fmpz_mat_kronecker_product(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B); /* Content */ FLINT_DLL void fmpz_mat_content(fmpz_t ret, const fmpz_mat_t A); /* Permutations */ FMPZ_MAT_INLINE void fmpz_mat_swap_rows(fmpz_mat_t mat, slong * perm, slong r, slong s) { if (r != s && !fmpz_mat_is_empty(mat)) { fmpz * u; slong t; if (perm) { t = perm[s]; perm[s] = perm[r]; perm[r] = t; } u = mat->rows[s]; mat->rows[s] = mat->rows[r]; mat->rows[r] = u; } } FMPZ_MAT_INLINE void fmpz_mat_invert_rows(fmpz_mat_t mat, slong * perm) { slong i; for (i = 0; i < mat->r/2; i++) fmpz_mat_swap_rows(mat, perm, i, mat->r - i - 1); } FMPZ_MAT_INLINE void fmpz_mat_swap_cols(fmpz_mat_t mat, slong * perm, slong r, slong s) { if (r != s && !fmpz_mat_is_empty(mat)) { slong t; if (perm) { t = perm[s]; perm[s] = perm[r]; perm[r] = t; } for (t = 0; t < mat->r; t++) { fmpz_swap(fmpz_mat_entry(mat, t, r), fmpz_mat_entry(mat, t, s)); } } } FMPZ_MAT_INLINE void fmpz_mat_invert_cols(fmpz_mat_t mat, slong * perm) { if (!fmpz_mat_is_empty(mat)) { slong t; slong i; slong c = mat->c; slong k = mat->c/2; if (perm) { for (i =0; i < k; i++) { t = perm[i]; perm[i] = perm[c - i]; perm[c - i] = t; } } for (t = 0; t < mat->r; t++) { for (i = 0; i < k; i++) { fmpz_swap(fmpz_mat_entry(mat, t, i), fmpz_mat_entry(mat, t, c - i - 1)); } } } } /* Gaussian elimination *****************************************************/ FLINT_DLL slong fmpz_mat_find_pivot_any(const fmpz_mat_t mat, slong start_row, slong end_row, slong c); FLINT_DLL slong fmpz_mat_find_pivot_smallest(const fmpz_mat_t mat, slong start_row, slong end_row, slong c); FLINT_DLL slong fmpz_mat_fflu(fmpz_mat_t B, fmpz_t den, slong * perm, const fmpz_mat_t A, int rank_check); FLINT_DLL slong fmpz_mat_rank_small_inplace(fmpz_mat_t B); FLINT_DLL slong fmpz_mat_rref(fmpz_mat_t B, fmpz_t den, const fmpz_mat_t A); FLINT_DLL slong fmpz_mat_rref_fflu(fmpz_mat_t B, fmpz_t den, const fmpz_mat_t A); FLINT_DLL slong fmpz_mat_rref_mul(fmpz_mat_t B, fmpz_t den, const fmpz_mat_t A); FLINT_DLL int fmpz_mat_is_in_rref_with_rank(const fmpz_mat_t A, const fmpz_t den, slong rank); /* Modular gaussian elimination *********************************************/ FLINT_DLL slong fmpz_mat_rref_mod(slong * perm, fmpz_mat_t A, const fmpz_t p); /* Modular Howell and strong echelon form ***********************************/ FLINT_DLL slong fmpz_mat_howell_form_mod(fmpz_mat_t A, const fmpz_t mod); FLINT_DLL void fmpz_mat_strong_echelon_form_mod(fmpz_mat_t A, const fmpz_t mod); /* Trace ********************************************************************/ FLINT_DLL void fmpz_mat_trace(fmpz_t trace, const fmpz_mat_t mat); /* Determinant **************************************************************/ FLINT_DLL void fmpz_mat_det(fmpz_t det, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_det_cofactor(fmpz_t det, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_det_bareiss(fmpz_t det, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_det_modular(fmpz_t det, const fmpz_mat_t A, int proved); FLINT_DLL void fmpz_mat_det_modular_accelerated(fmpz_t det, const fmpz_mat_t A, int proved); FLINT_DLL void fmpz_mat_det_modular_given_divisor(fmpz_t det, const fmpz_mat_t A, const fmpz_t d, int proved); FLINT_DLL void fmpz_mat_det_bound(fmpz_t bound, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_det_bound_nonzero(fmpz_t bound, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_det_divisor(fmpz_t d, const fmpz_mat_t A); /* Transforms */ FLINT_DLL void fmpz_mat_similarity(fmpz_mat_t A, slong r, fmpz_t d); #include "fmpz_poly.h" /* Characteristic polynomial ************************************************/ FLINT_DLL void _fmpz_mat_charpoly_berkowitz(fmpz * rop, const fmpz_mat_t op); FLINT_DLL void fmpz_mat_charpoly_berkowitz(fmpz_poly_t cp, const fmpz_mat_t mat); FLINT_DLL void _fmpz_mat_charpoly_modular(fmpz * rop, const fmpz_mat_t op); FLINT_DLL void fmpz_mat_charpoly_modular(fmpz_poly_t cp, const fmpz_mat_t mat); FMPZ_MAT_INLINE void _fmpz_mat_charpoly(fmpz * cp, const fmpz_mat_t mat) { _fmpz_mat_charpoly_modular(cp, mat); } FMPZ_MAT_INLINE void fmpz_mat_charpoly(fmpz_poly_t cp, const fmpz_mat_t mat) { if (mat->r != mat->c) { flint_printf("Exception (nmod_mat_charpoly). Non-square matrix.\n"); flint_abort(); } fmpz_mat_charpoly_modular(cp, mat); } /* Characteristic polynomial ************************************************/ FLINT_DLL slong _fmpz_mat_minpoly_modular(fmpz * rop, const fmpz_mat_t op); FLINT_DLL void fmpz_mat_minpoly_modular(fmpz_poly_t cp, const fmpz_mat_t mat); FMPZ_MAT_INLINE slong _fmpz_mat_minpoly(fmpz * cp, const fmpz_mat_t mat) { return _fmpz_mat_minpoly_modular(cp, mat); } FMPZ_MAT_INLINE void fmpz_mat_minpoly(fmpz_poly_t cp, const fmpz_mat_t mat) { if (mat->r != mat->c) { flint_printf("Exception (fmpz_mat_minpoly). Non-square matrix.\n"); flint_abort(); } fmpz_mat_minpoly_modular(cp, mat); } /* Rank **********************************************************************/ FLINT_DLL slong fmpz_mat_rank(const fmpz_mat_t A); /* Nonsingular solving *******************************************************/ FLINT_DLL void fmpz_mat_solve_bound(fmpz_t N, fmpz_t D, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_solve(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_solve_cramer(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_solve_fflu(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_solve_fflu_precomp(fmpz_mat_t X, const slong * perm, const fmpz_mat_t FFLU, const fmpz_mat_t B); FLINT_DLL mp_limb_t fmpz_mat_find_good_prime_and_invert(nmod_mat_t Ainv, const fmpz_mat_t A, const fmpz_t det_bound); FLINT_DLL mp_limb_t * fmpz_mat_dixon_get_crt_primes(slong * num_primes, const fmpz_mat_t A, mp_limb_t p); FLINT_DLL void _fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, const fmpz_mat_t A, const fmpz_mat_t B, const nmod_mat_t Ainv, mp_limb_t p, const fmpz_t N, const fmpz_t D); FLINT_DLL int fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL void _fmpz_mat_solve_dixon_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B, const nmod_mat_t Ainv, mp_limb_t p, const fmpz_t N, const fmpz_t D); FLINT_DLL int fmpz_mat_solve_dixon_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_solve_multi_mod_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_can_solve_multi_mod_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_can_solve_fflu(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); FLINT_DLL int fmpz_mat_can_solve(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B); /* Nullspace *****************************************************************/ FLINT_DLL slong fmpz_mat_nullspace(fmpz_mat_t res, const fmpz_mat_t mat); /* Inverse *******************************************************************/ FLINT_DLL int fmpz_mat_inv(fmpz_mat_t B, fmpz_t den, const fmpz_mat_t A); /* Modular reduction and reconstruction **************************************/ FLINT_DLL void fmpz_mat_set_nmod_mat(fmpz_mat_t A, const nmod_mat_t Amod); FLINT_DLL void fmpz_mat_set_nmod_mat_unsigned(fmpz_mat_t A, const nmod_mat_t Amod); FLINT_DLL void fmpz_mat_get_nmod_mat(nmod_mat_t Amod, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_CRT_ui(fmpz_mat_t res, const fmpz_mat_t mat1, const fmpz_t m1, const nmod_mat_t mat2, int sign); FLINT_DLL void fmpz_mat_multi_mod_ui_precomp(nmod_mat_t * residues, slong nres, const fmpz_mat_t mat, const fmpz_comb_t comb, fmpz_comb_temp_t temp); FLINT_DLL void fmpz_mat_multi_mod_ui(nmod_mat_t * residues, slong nres, const fmpz_mat_t mat); FLINT_DLL void fmpz_mat_multi_CRT_ui_precomp(fmpz_mat_t mat, nmod_mat_t * const residues, slong nres, const fmpz_comb_t comb, fmpz_comb_temp_t temp, int sign); FLINT_DLL void fmpz_mat_multi_CRT_ui(fmpz_mat_t mat, nmod_mat_t * const residues, slong nres, int sign); /* HNF and SNF **************************************************************/ FLINT_DLL void fmpz_mat_hnf(fmpz_mat_t H, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_transform(fmpz_mat_t H, fmpz_mat_t U, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_classical(fmpz_mat_t H, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_xgcd(fmpz_mat_t H, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_minors(fmpz_mat_t H, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_minors_transform(fmpz_mat_t H, fmpz_mat_t U, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_hnf_modular(fmpz_mat_t H, const fmpz_mat_t A, const fmpz_t D); FLINT_DLL void fmpz_mat_hnf_modular_eldiv(fmpz_mat_t A, const fmpz_t D); FLINT_DLL void fmpz_mat_hnf_pernet_stein(fmpz_mat_t H, const fmpz_mat_t A, flint_rand_t state); FLINT_DLL int fmpz_mat_is_in_hnf(const fmpz_mat_t A); FLINT_DLL void fmpz_mat_snf(fmpz_mat_t S, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_snf_diagonal(fmpz_mat_t S, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_snf_kannan_bachem(fmpz_mat_t S, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_snf_iliopoulos(fmpz_mat_t S, const fmpz_mat_t A, const fmpz_t mod); FLINT_DLL int fmpz_mat_is_in_snf(const fmpz_mat_t A); /* Special matrices **********************************************************/ FLINT_DLL int fmpz_mat_is_hadamard(const fmpz_mat_t A); FLINT_DLL int fmpz_mat_hadamard(fmpz_mat_t A); /* Gram matrix **************************************************************/ FLINT_DLL void fmpz_mat_gram(fmpz_mat_t B, const fmpz_mat_t A); /* Conversions **************************************************************/ FLINT_DLL int fmpz_mat_get_d_mat(d_mat_t B, const fmpz_mat_t A); FLINT_DLL int fmpz_mat_get_d_mat_transpose(d_mat_t B, const fmpz_mat_t A); FLINT_DLL void fmpz_mat_get_mpf_mat(mpf_mat_t B, const fmpz_mat_t A); /* Cholesky Decomposition ****************************************************/ FLINT_DLL void fmpz_mat_chol_d(d_mat_t R, const fmpz_mat_t A); /* LLL ***********************************************************************/ FLINT_DLL int fmpz_mat_is_reduced(const fmpz_mat_t A, double delta, double eta); FLINT_DLL int fmpz_mat_is_reduced_gram(const fmpz_mat_t A, double delta, double eta); FLINT_DLL int fmpz_mat_is_reduced_with_removal(const fmpz_mat_t A, double delta, double eta, const fmpz_t gs_B, int newd); FLINT_DLL int fmpz_mat_is_reduced_gram_with_removal(const fmpz_mat_t A, double delta, double eta, const fmpz_t gs_B, int newd); /* Classical LLL *************************************************************/ FLINT_DLL void fmpz_mat_lll_original(fmpz_mat_t A, const fmpq_t delta, const fmpq_t eta); /* Modified LLL **************************************************************/ FLINT_DLL void fmpz_mat_lll_storjohann(fmpz_mat_t A, const fmpq_t delta, const fmpq_t eta); /* Column partitioning *******************************************************/ FLINT_DLL int fmpz_mat_col_partition(slong * part, fmpz_mat_t M, int short_circuit); /* Van Hoeij helper function *************************************************/ FLINT_DLL int fmpz_mat_next_col_van_hoeij(fmpz_mat_t M, fmpz_t P, fmpz_mat_t col, slong exp, slong U_exp); #ifdef __cplusplus } #endif #endif
/* Copyright (C) 2010 William Hart Copyright (C) 2010 Fredrik Johansson This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
fitness_storage.ml
let current = Raw_context.current_fitness let increase ctxt = let fitness = current ctxt in Raw_context.set_current_fitness ctxt (Int64.succ fitness)
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
context.mli
(** View over the context store, restricted to types, access and functional manipulation of an existing context. *) type t (** Keys in (kex x value) database implementations *) type key = string list (** Values in (kex x value) database implementations *) type value = MBytes.t val mem : t -> key -> bool Lwt.t val dir_mem : t -> key -> bool Lwt.t val get : t -> key -> value option Lwt.t val set : t -> key -> value -> t Lwt.t (** [copy] returns None if the [from] key is not bound *) val copy : t -> from:key -> to_:key -> t option Lwt.t val del : t -> key -> t Lwt.t val remove_rec : t -> key -> t Lwt.t val fold : t -> key -> init:'a -> f:([`Key of key | `Dir of key] -> 'a -> 'a Lwt.t) -> 'a Lwt.t val keys : t -> key -> key list Lwt.t val fold_keys : t -> key -> init:'a -> f:(key -> 'a -> 'a Lwt.t) -> 'a Lwt.t val register_resolver : 'a Base58.encoding -> (t -> string -> 'a list Lwt.t) -> unit val complete : t -> string -> string list Lwt.t
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
Mini_rules_filter.ml
module Flag = Flag_semgrep module R = Mini_rule (*****************************************************************************) (* Prelude *) (*****************************************************************************) (* Mini rules filtering using regexps. * * This is deprecated; It is better to do the * regexp-extraction-from-pattern optimization at the rule level * in Semgrep.ml (instead of on mini-rule level in Semgrep_generic.ml). *) let logger = Logging.get_logger [ __MODULE__ ] (*****************************************************************************) (* Entry point *) (*****************************************************************************) let filter_mini_rules_relevant_to_file_using_regexp rules lang file = let str = Common.read_file file in rules |> List.filter (fun rule -> let pat = rule.R.pattern in let xs = Analyze_pattern.extract_specific_strings ~lang pat in (* pr2_gen xs; *) let match_ = (* we could avoid running multiple regexps on the same file * by first orring them and do the and only of the or succeed, * but probably not worth the opti. let t = xs |> List.map (fun x -> regexp_matching_str x) |> Re.alt in let re = compile_regexp t in run_regexp re str *) (* Note that right now we do a for_all but it mighe be incorrect * at some point if the pattern contains DisjExpr for example, in * which case we will need extract_specific_strings to directly * extract a complex regexp instead handling itself disjunction. *) xs |> List.for_all (fun x -> let re = Regexp_engine.matching_exact_string x in Regexp_engine.unanchored_match re str) in if not match_ then logger#info "filtering rule %s" rule.R.id; match_) [@@profiling "Mini_rules_filter.filter"]
(* Yoann Padioleau * * Copyright (C) 2020 r2c * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * version 2.1 as published by the Free Software Foundation, with the * special exception on linking described in file LICENSE. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the file * LICENSE for more details. *)
raw_context.ml
module Int_set = Set.Make (Compare.Int) type t = { context: Context.t ; constants: Constants_repr.parametric ; first_level: Raw_level_repr.t ; level: Level_repr.t ; timestamp: Time.t ; fitness: Int64.t ; deposits: Tez_repr.t Signature.Public_key_hash.Map.t ; allowed_endorsements: (Signature.Public_key.t * int list * bool) Signature.Public_key_hash.Map.t ; fees: Tez_repr.t ; rewards: Tez_repr.t ; block_gas: Z.t ; operation_gas: Gas_limit_repr.t ; storage_space_to_pay: Z.t option ; allocated_contracts: int option ; origination_nonce: Contract_repr.origination_nonce option ; internal_nonce: int ; internal_nonces_used: Int_set.t ; } type context = t type root_context = t let current_level ctxt = ctxt.level let current_timestamp ctxt = ctxt.timestamp let current_fitness ctxt = ctxt.fitness let first_level ctxt = ctxt.first_level let constants ctxt = ctxt.constants let recover ctxt = ctxt.context let record_endorsement ctxt k = match Signature.Public_key_hash.Map.find_opt k ctxt.allowed_endorsements with | None -> assert false | Some (_, _, true) -> assert false (* right already used *) | Some (d, s, false) -> { ctxt with allowed_endorsements = Signature.Public_key_hash.Map.add k (d,s,true) ctxt.allowed_endorsements } let init_endorsements ctxt allowed_endorsements = if Signature.Public_key_hash.Map.is_empty allowed_endorsements then assert false (* can't initialize to empty *) else begin if Signature.Public_key_hash.Map.is_empty ctxt.allowed_endorsements then { ctxt with allowed_endorsements } else assert false (* can't initialize twice *) end let allowed_endorsements ctxt = ctxt.allowed_endorsements type error += Too_many_internal_operations (* `Permanent *) let () = let open Data_encoding in register_error_kind `Permanent ~id:"too_many_internal_operations" ~title: "Too many internal operations" ~description: "A transaction exceeded the hard limit \ of internal operations it can emit" empty (function Too_many_internal_operations -> Some () | _ -> None) (fun () -> Too_many_internal_operations) let fresh_internal_nonce ctxt = if Compare.Int.(ctxt.internal_nonce >= 65_535) then error Too_many_internal_operations else ok ({ ctxt with internal_nonce = ctxt.internal_nonce + 1 }, ctxt.internal_nonce) let reset_internal_nonce ctxt = { ctxt with internal_nonces_used = Int_set.empty ; internal_nonce = 0 } let record_internal_nonce ctxt k = { ctxt with internal_nonces_used = Int_set.add k ctxt.internal_nonces_used } let internal_nonce_already_recorded ctxt k = Int_set.mem k ctxt.internal_nonces_used let set_current_fitness ctxt fitness = { ctxt with fitness } let add_fees ctxt fees = Lwt.return Tez_repr.(ctxt.fees +? fees) >>=? fun fees -> return { ctxt with fees} let add_rewards ctxt rewards = Lwt.return Tez_repr.(ctxt.rewards +? rewards) >>=? fun rewards -> return { ctxt with rewards} let add_deposit ctxt delegate deposit = let previous = match Signature.Public_key_hash.Map.find_opt delegate ctxt.deposits with | Some tz -> tz | None -> Tez_repr.zero in Lwt.return Tez_repr.(previous +? deposit) >>=? fun deposit -> let deposits = Signature.Public_key_hash.Map.add delegate deposit ctxt.deposits in return { ctxt with deposits } let get_deposits ctxt = ctxt.deposits let get_rewards ctxt = ctxt.rewards let get_fees ctxt = ctxt.fees type error += Undefined_operation_nonce (* `Permanent *) let () = let open Data_encoding in register_error_kind `Permanent ~id:"undefined_operation_nonce" ~title: "Ill timed access to the origination nonce" ~description: "An origination was attemped out of the scope of a manager operation" empty (function Undefined_operation_nonce -> Some () | _ -> None) (fun () -> Undefined_operation_nonce) let init_origination_nonce ctxt operation_hash = let origination_nonce = Some (Contract_repr.initial_origination_nonce operation_hash) in { ctxt with origination_nonce } let origination_nonce ctxt = match ctxt.origination_nonce with | None -> error Undefined_operation_nonce | Some origination_nonce -> ok origination_nonce let increment_origination_nonce ctxt = match ctxt.origination_nonce with | None -> error Undefined_operation_nonce | Some cur_origination_nonce -> let origination_nonce = Some (Contract_repr.incr_origination_nonce cur_origination_nonce) in ok ({ ctxt with origination_nonce }, cur_origination_nonce) let unset_origination_nonce ctxt = { ctxt with origination_nonce = None } type error += Gas_limit_too_high (* `Permanent *) let () = let open Data_encoding in register_error_kind `Permanent ~id:"gas_limit_too_high" ~title: "Gas limit out of protocol hard bounds" ~description: "A transaction tried to exceed the hard limit on gas" empty (function Gas_limit_too_high -> Some () | _ -> None) (fun () -> Gas_limit_too_high) let check_gas_limit ctxt remaining = if Compare.Z.(remaining > ctxt.constants.hard_gas_limit_per_operation) || Compare.Z.(remaining < Z.zero) then error Gas_limit_too_high else ok () let set_gas_limit ctxt remaining = { ctxt with operation_gas = Limited { remaining } } let set_gas_unlimited ctxt = { ctxt with operation_gas = Unaccounted } let consume_gas ctxt cost = Gas_limit_repr.consume ctxt.block_gas ctxt.operation_gas cost >>? fun (block_gas, operation_gas) -> ok { ctxt with block_gas ; operation_gas } let check_enough_gas ctxt cost = Gas_limit_repr.check_enough ctxt.block_gas ctxt.operation_gas cost let gas_level ctxt = ctxt.operation_gas let block_gas_level ctxt = ctxt.block_gas let gas_consumed ~since ~until = match gas_level since, gas_level until with | Limited { remaining = before }, Limited { remaining = after } -> Z.sub before after | _, _ -> Z.zero let init_storage_space_to_pay ctxt = match ctxt.storage_space_to_pay with | Some _ -> assert false | None -> { ctxt with storage_space_to_pay = Some Z.zero ; allocated_contracts = Some 0 } let update_storage_space_to_pay ctxt n = match ctxt.storage_space_to_pay with | None -> assert false | Some storage_space_to_pay -> { ctxt with storage_space_to_pay = Some (Z.add n storage_space_to_pay) } let update_allocated_contracts_count ctxt = match ctxt.allocated_contracts with | None -> assert false | Some allocated_contracts -> { ctxt with allocated_contracts = Some (succ allocated_contracts) } let clear_storage_space_to_pay ctxt = match ctxt.storage_space_to_pay, ctxt.allocated_contracts with | None, _ | _, None -> assert false | Some storage_space_to_pay, Some allocated_contracts -> { ctxt with storage_space_to_pay = None ; allocated_contracts = None}, storage_space_to_pay, allocated_contracts type storage_error = | Incompatible_protocol_version of string | Missing_key of string list * [`Get | `Set | `Del | `Copy] | Existing_key of string list | Corrupted_data of string list let storage_error_encoding = let open Data_encoding in union [ case (Tag 0) ~title:"Incompatible_protocol_version" (obj1 (req "incompatible_protocol_version" string)) (function Incompatible_protocol_version arg -> Some arg | _ -> None) (fun arg -> Incompatible_protocol_version arg) ; case (Tag 1) ~title:"Missing_key" (obj2 (req "missing_key" (list string)) (req "function" (string_enum ["get", `Get ; "set", `Set ; "del", `Del ; "copy", `Copy ]))) (function Missing_key (key, f) -> Some (key, f) | _ -> None) (fun (key, f) -> Missing_key (key, f)) ; case (Tag 2) ~title:"Existing_key" (obj1 (req "existing_key" (list string))) (function Existing_key key -> Some key | _ -> None) (fun key -> Existing_key key) ; case (Tag 3) ~title:"Corrupted_data" (obj1 (req "corrupted_data" (list string))) (function Corrupted_data key -> Some key | _ -> None) (fun key -> Corrupted_data key) ; ] let pp_storage_error ppf = function | Incompatible_protocol_version version -> Format.fprintf ppf "Found a context with an unexpected version '%s'." version | Missing_key (key, `Get) -> Format.fprintf ppf "Missing key '%s'." (String.concat "/" key) | Missing_key (key, `Set) -> Format.fprintf ppf "Cannot set undefined key '%s'." (String.concat "/" key) | Missing_key (key, `Del) -> Format.fprintf ppf "Cannot delete undefined key '%s'." (String.concat "/" key) | Missing_key (key, `Copy) -> Format.fprintf ppf "Cannot copy undefined key '%s'." (String.concat "/" key) | Existing_key key -> Format.fprintf ppf "Cannot initialize defined key '%s'." (String.concat "/" key) | Corrupted_data key -> Format.fprintf ppf "Failed to parse the data at '%s'." (String.concat "/" key) type error += Storage_error of storage_error let () = register_error_kind `Permanent ~id:"context.storage_error" ~title: "Storage error (fatal internal error)" ~description: "An error that should never happen unless something \ has been deleted or corrupted in the database." ~pp:(fun ppf err -> Format.fprintf ppf "@[<v 2>Storage error:@ %a@]" pp_storage_error err) storage_error_encoding (function Storage_error err -> Some err | _ -> None) (fun err -> Storage_error err) let storage_error err = fail (Storage_error err) (* Initialization *********************************************************) (* This key should always be populated for every version of the protocol. It's absence meaning that the context is empty. *) let version_key = ["version"] let version_value = "athens_004" let version = "v1" let first_level_key = [ version ; "first_level" ] let constants_key = [ version ; "constants" ] let protocol_param_key = [ "protocol_parameters" ] let get_first_level ctxt = Context.get ctxt first_level_key >>= function | None -> storage_error (Missing_key (first_level_key, `Get)) | Some bytes -> match Data_encoding.Binary.of_bytes Raw_level_repr.encoding bytes with | None -> storage_error (Corrupted_data first_level_key) | Some level -> return level let set_first_level ctxt level = let bytes = Data_encoding.Binary.to_bytes_exn Raw_level_repr.encoding level in Context.set ctxt first_level_key bytes >>= fun ctxt -> return ctxt type error += Failed_to_parse_parameter of MBytes.t type error += Failed_to_decode_parameter of Data_encoding.json * string let () = register_error_kind `Temporary ~id:"context.failed_to_parse_parameter" ~title: "Failed to parse parameter" ~description: "The protocol parameters are not valid JSON." ~pp:begin fun ppf bytes -> Format.fprintf ppf "@[<v 2>Cannot parse the protocol parameter:@ %s@]" (MBytes.to_string bytes) end Data_encoding.(obj1 (req "contents" bytes)) (function Failed_to_parse_parameter data -> Some data | _ -> None) (fun data -> Failed_to_parse_parameter data) ; register_error_kind `Temporary ~id:"context.failed_to_decode_parameter" ~title: "Failed to decode parameter" ~description: "Unexpected JSON object." ~pp:begin fun ppf (json, msg) -> Format.fprintf ppf "@[<v 2>Cannot decode the protocol parameter:@ %s@ %a@]" msg Data_encoding.Json.pp json end Data_encoding.(obj2 (req "contents" json) (req "error" string)) (function | Failed_to_decode_parameter (json, msg) -> Some (json, msg) | _ -> None) (fun (json, msg) -> Failed_to_decode_parameter (json, msg)) let get_proto_param ctxt = Context.get ctxt protocol_param_key >>= function | None -> failwith "Missing protocol parameters." | Some bytes -> match Data_encoding.Binary.of_bytes Data_encoding.json bytes with | None -> fail (Failed_to_parse_parameter bytes) | Some json -> begin Context.del ctxt protocol_param_key >>= fun ctxt -> match Data_encoding.Json.destruct Parameters_repr.encoding json with | exception (Data_encoding.Json.Cannot_destruct _ as exn) -> Format.kasprintf failwith "Invalid protocol_parameters: %a %a" (fun ppf -> Data_encoding.Json.print_error ppf) exn Data_encoding.Json.pp json | param -> return (param, ctxt) end let set_constants ctxt constants = let bytes = Data_encoding.Binary.to_bytes_exn Parameters_repr.constants_encoding constants in Context.set ctxt constants_key bytes let get_constants ctxt = Context.get ctxt constants_key >>= function | None -> failwith "Internal error: cannot read constants in context." | Some bytes -> match Data_encoding.Binary.of_bytes Parameters_repr.constants_encoding bytes with | None -> failwith "Internal error: cannot parse constants in context." | Some constants -> return constants (* Only for stitching from proto_003 *) let get_003_constants ctxt = Context.get ctxt constants_key >>= function | None -> failwith "Internal error: cannot read constants in context." | Some bytes -> match Data_encoding.Binary.of_bytes Parameters_repr.Proto_003.constants_encoding bytes with | None -> failwith "Internal error: cannot parse constants in context." | Some constants -> return constants let patch_constants ctxt f = let constants = f ctxt.constants in set_constants ctxt.context constants >>= fun context -> Lwt.return { ctxt with context ; constants } let check_inited ctxt = Context.get ctxt version_key >>= function | None -> failwith "Internal error: un-initialized context." | Some bytes -> let s = MBytes.to_string bytes in if Compare.String.(s = version_value) then return_unit else storage_error (Incompatible_protocol_version s) let prepare ~level ~timestamp ~fitness ctxt = Lwt.return (Raw_level_repr.of_int32 level) >>=? fun level -> Lwt.return (Fitness_repr.to_int64 fitness) >>=? fun fitness -> check_inited ctxt >>=? fun () -> get_constants ctxt >>=? fun constants -> get_first_level ctxt >>=? fun first_level -> let level = Level_repr.from_raw ~first_level ~blocks_per_cycle:constants.Constants_repr.blocks_per_cycle ~blocks_per_voting_period:constants.Constants_repr.blocks_per_voting_period ~blocks_per_commitment:constants.Constants_repr.blocks_per_commitment level in return { context = ctxt ; constants ; level ; timestamp ; fitness ; first_level ; allowed_endorsements = Signature.Public_key_hash.Map.empty ; fees = Tez_repr.zero ; rewards = Tez_repr.zero ; deposits = Signature.Public_key_hash.Map.empty ; operation_gas = Unaccounted ; storage_space_to_pay = None ; allocated_contracts = None ; block_gas = constants.Constants_repr.hard_gas_limit_per_block ; origination_nonce = None ; internal_nonce = 0 ; internal_nonces_used = Int_set.empty ; } type previous_protocol = | Genesis of Parameters_repr.t | Alpha_003 let check_and_update_protocol_version ctxt = begin Context.get ctxt version_key >>= function | None -> failwith "Internal error: un-initialized context in check_first_block." | Some bytes -> let s = MBytes.to_string bytes in if Compare.String.(s = version_value) then failwith "Internal error: previously initialized context." else if Compare.String.(s = "genesis") then get_proto_param ctxt >>=? fun (param, ctxt) -> return (Genesis param, ctxt) else if Compare.String.(s = "alpha_003") then return (Alpha_003, ctxt) else storage_error (Incompatible_protocol_version s) end >>=? fun (previous_proto, ctxt) -> Context.set ctxt version_key (MBytes.of_string version_value) >>= fun ctxt -> return (previous_proto, ctxt) let prepare_first_block ~level ~timestamp ~fitness ctxt = check_and_update_protocol_version ctxt >>=? fun (previous_proto, ctxt) -> begin match previous_proto with | Genesis param -> Lwt.return (Raw_level_repr.of_int32 level) >>=? fun first_level -> set_first_level ctxt first_level >>=? fun ctxt -> set_constants ctxt param.constants >>= fun ctxt -> return ctxt | Alpha_003 -> (* Migration: Since we changed `Constants_repr.parametric_encoding`, we need to upgrade the contents of `constants_key` in the context. *) get_003_constants ctxt >>=? fun (old : Parameters_repr.Proto_003.parametric) -> set_constants ctxt { preserved_cycles = old.preserved_cycles ; blocks_per_cycle = old.blocks_per_cycle ; blocks_per_commitment = old.blocks_per_commitment ; blocks_per_roll_snapshot = old.blocks_per_roll_snapshot ; blocks_per_voting_period = old.blocks_per_voting_period ; time_between_blocks = old.time_between_blocks ; endorsers_per_block = old.endorsers_per_block ; hard_gas_limit_per_operation = old.hard_gas_limit_per_operation ; hard_gas_limit_per_block = old.hard_gas_limit_per_block ; proof_of_work_threshold = old.proof_of_work_threshold ; tokens_per_roll = old.tokens_per_roll ; michelson_maximum_type_size = old.michelson_maximum_type_size ; seed_nonce_revelation_tip = old.seed_nonce_revelation_tip ; origination_size = old.origination_size ; block_security_deposit = old.block_security_deposit ; endorsement_security_deposit = old.endorsement_security_deposit ; block_reward = old.block_reward ; endorsement_reward = old.endorsement_reward ; cost_per_byte = old.cost_per_byte ; hard_storage_limit_per_operation = old.hard_storage_limit_per_operation ; test_chain_duration = Constants_repr.default.test_chain_duration ; } >>= fun ctxt -> return ctxt end >>=? fun ctxt -> prepare ctxt ~level ~timestamp ~fitness >>=? fun ctxt -> return (previous_proto, ctxt) let activate ({ context = c ; _ } as s) h = Updater.activate c h >>= fun c -> Lwt.return { s with context = c } let fork_test_chain ({ context = c ; _ } as s) protocol expiration = Updater.fork_test_chain c ~protocol ~expiration >>= fun c -> Lwt.return { s with context = c } let register_resolvers enc resolve = let resolve context str = let faked_context = { context ; constants = Constants_repr.default ; first_level = Raw_level_repr.root ; level = Level_repr.root Raw_level_repr.root ; timestamp = Time.of_seconds 0L ; fitness = 0L ; allowed_endorsements = Signature.Public_key_hash.Map.empty ; storage_space_to_pay = None ; allocated_contracts = None ; fees = Tez_repr.zero ; rewards = Tez_repr.zero ; deposits = Signature.Public_key_hash.Map.empty ; block_gas = Constants_repr.default.hard_gas_limit_per_block ; operation_gas = Unaccounted ; origination_nonce = None ; internal_nonce = 0 ; internal_nonces_used = Int_set.empty ; } in resolve faked_context str in Context.register_resolver enc resolve (* Generic context ********************************************************) type key = string list type value = MBytes.t module type T = sig type t type context = t val mem: context -> key -> bool Lwt.t val dir_mem: context -> key -> bool Lwt.t val get: context -> key -> value tzresult Lwt.t val get_option: context -> key -> value option Lwt.t val init: context -> key -> value -> context tzresult Lwt.t val set: context -> key -> value -> context tzresult Lwt.t val init_set: context -> key -> value -> context Lwt.t val set_option: context -> key -> value option -> context Lwt.t val delete: context -> key -> context tzresult Lwt.t val remove: context -> key -> context Lwt.t val remove_rec: context -> key -> context Lwt.t val copy: context -> from:key -> to_:key -> context tzresult Lwt.t val fold: context -> key -> init:'a -> f:([ `Key of key | `Dir of key ] -> 'a -> 'a Lwt.t) -> 'a Lwt.t val keys: context -> key -> key list Lwt.t val fold_keys: context -> key -> init:'a -> f:(key -> 'a -> 'a Lwt.t) -> 'a Lwt.t val project: context -> root_context val absolute_key: context -> key -> key val consume_gas: context -> Gas_limit_repr.cost -> context tzresult val check_enough_gas: context -> Gas_limit_repr.cost -> unit tzresult val description: context Storage_description.t end let mem ctxt k = Context.mem ctxt.context k let dir_mem ctxt k = Context.dir_mem ctxt.context k let get ctxt k = Context.get ctxt.context k >>= function | None -> storage_error (Missing_key (k, `Get)) | Some v -> return v let get_option ctxt k = Context.get ctxt.context k (* Verify that the k is present before modifying *) let set ctxt k v = Context.mem ctxt.context k >>= function | false -> storage_error (Missing_key (k, `Set)) | true -> Context.set ctxt.context k v >>= fun context -> return { ctxt with context } (* Verify that the k is not present before inserting *) let init ctxt k v = Context.mem ctxt.context k >>= function | true -> storage_error (Existing_key k) | false -> Context.set ctxt.context k v >>= fun context -> return { ctxt with context } (* Does not verify that the key is present or not *) let init_set ctxt k v = Context.set ctxt.context k v >>= fun context -> Lwt.return { ctxt with context } (* Verify that the key is present before deleting *) let delete ctxt k = Context.mem ctxt.context k >>= function | false -> storage_error (Missing_key (k, `Del)) | true -> Context.del ctxt.context k >>= fun context -> return { ctxt with context } (* Do not verify before deleting *) let remove ctxt k = Context.del ctxt.context k >>= fun context -> Lwt.return { ctxt with context } let set_option ctxt k = function | None -> remove ctxt k | Some v -> init_set ctxt k v let remove_rec ctxt k = Context.remove_rec ctxt.context k >>= fun context -> Lwt.return { ctxt with context } let copy ctxt ~from ~to_ = Context.copy ctxt.context ~from ~to_ >>= function | None -> storage_error (Missing_key (from, `Copy)) | Some context -> return { ctxt with context } let fold ctxt k ~init ~f = Context.fold ctxt.context k ~init ~f let keys ctxt k = Context.keys ctxt.context k let fold_keys ctxt k ~init ~f = Context.fold_keys ctxt.context k ~init ~f let project x = x let absolute_key _ k = k let description = Storage_description.create ()
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
client_proto_programs.ml
open Protocol open Alpha_context open Tezos_micheline open Michelson_v1_printer module Program = Client_aliases.Alias (struct type t = Michelson_v1_parser.parsed Micheline_parser.parsing_result include Compare.Make (struct type nonrec t = t let compare = Micheline_parser.compare Michelson_v1_parser.compare_parsed end) let encoding = Data_encoding.conv (fun ({Michelson_v1_parser.source; _}, _) -> source) (fun source -> Michelson_v1_parser.parse_toplevel source) Data_encoding.string let of_source source = return (Michelson_v1_parser.parse_toplevel source) let to_source ({Michelson_v1_parser.source; _}, _) = return source let name = "script" end) let print_errors (cctxt : #Client_context.printer) errs ~show_source ~parsed = cctxt#warning "%a" (Michelson_v1_error_reporter.report_errors ~details:false ~show_source ~parsed) errs >>= fun () -> cctxt#error "error running script" >>= fun () -> return_unit let print_big_map_diff ppf = function | None -> () | Some diff -> Format.fprintf ppf "@[<v 2>map diff:@,%a@]@," (Format.pp_print_list ~pp_sep:Format.pp_print_space (fun ppf Contract.{diff_key; diff_value; _} -> Format.fprintf ppf "%s %a%a" (match diff_value with None -> "-" | Some _ -> "+") print_expr diff_key (fun ppf -> function | None -> () | Some x -> Format.fprintf ppf "-> %a" print_expr x) diff_value)) diff let print_run_result (cctxt : #Client_context.printer) ~show_source ~parsed = function | Ok (storage, operations, maybe_diff) -> cctxt#message "@[<v 0>@[<v 2>storage@,\ %a@]@,\ @[<v 2>emitted operations@,\ %a@]@,\ @[%a@]@]@." print_expr storage (Format.pp_print_list Operation_result.pp_internal_operation) operations print_big_map_diff maybe_diff >>= fun () -> return_unit | Error errs -> print_errors cctxt errs ~show_source ~parsed let print_trace_result (cctxt : #Client_context.printer) ~show_source ~parsed = function | Ok (storage, operations, trace, maybe_big_map_diff) -> cctxt#message "@[<v 0>@[<v 2>storage@,\ %a@]@,\ @[<v 2>emitted operations@,\ %a@]@,\ %a@[<v 2>@[<v 2>trace@,\ %a@]@]@." print_expr storage (Format.pp_print_list Operation_result.pp_internal_operation) operations print_big_map_diff maybe_big_map_diff print_execution_trace trace >>= fun () -> return_unit | Error errs -> print_errors cctxt errs ~show_source ~parsed let run (cctxt : #Alpha_client_context.rpc_context) ~(chain : Chain_services.chain) ~block ?(amount = Tez.fifty_cents) ~(program : Michelson_v1_parser.parsed) ~(storage : Michelson_v1_parser.parsed) ~(input : Michelson_v1_parser.parsed) () = Alpha_services.Helpers.Scripts.run_code cctxt (chain, block) program.expanded (storage.expanded, input.expanded, amount) let trace (cctxt : #Alpha_client_context.rpc_context) ~(chain : Chain_services.chain) ~block ?(amount = Tez.fifty_cents) ~(program : Michelson_v1_parser.parsed) ~(storage : Michelson_v1_parser.parsed) ~(input : Michelson_v1_parser.parsed) () = Alpha_services.Helpers.Scripts.trace_code cctxt (chain, block) program.expanded (storage.expanded, input.expanded, amount) let typecheck_data cctxt ~(chain : Chain_services.chain) ~block ?gas ~(data : Michelson_v1_parser.parsed) ~(ty : Michelson_v1_parser.parsed) () = Alpha_services.Helpers.Scripts.typecheck_data cctxt (chain, block) (data.expanded, ty.expanded, gas) let typecheck_program cctxt ~(chain : Chain_services.chain) ~block ?gas (program : Michelson_v1_parser.parsed) = Alpha_services.Helpers.Scripts.typecheck_code cctxt (chain, block) (program.expanded, gas) let print_typecheck_result ~emacs ~show_types ~print_source_on_error program res (cctxt : #Client_context.printer) = if emacs then let type_map, errs, _gas = match res with | Ok (type_map, gas) -> (type_map, [], Some gas) | Error (Environment.Ecoproto_error (Script_tc_errors.Ill_typed_contract (_, type_map)) :: _ as errs) -> (type_map, errs, None) | Error errs -> ([], errs, None) in cctxt#message "(@[<v 0>(types . %a)@ (errors . %a)@])" Michelson_v1_emacs.print_type_map (program, type_map) Michelson_v1_emacs.report_errors (program, errs) >>= fun () -> return_unit else match res with | Ok (type_map, gas) -> let program = Michelson_v1_printer.inject_types type_map program in cctxt#message "@[<v 0>Well typed@,Gas remaining: %a@]" Gas.pp gas >>= fun () -> if show_types then cctxt#message "%a" Micheline_printer.print_expr program >>= fun () -> return_unit else return_unit | Error errs -> cctxt#warning "%a" (Michelson_v1_error_reporter.report_errors ~details:show_types ~show_source:print_source_on_error ~parsed:program) errs >>= fun () -> cctxt#error "ill-typed script"
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
cmdliner_info.ml
(* Exit codes *) module Exit = struct type code = int let ok = 0 let some_error = 123 let cli_error = 124 let internal_error = 125 type info = { codes : code * code; (* min, max *) doc : string; (* help. *) docs : string; } (* title of help section where listed. *) let info ?(docs = Cmdliner_manpage.s_exit_status) ?(doc = "undocumented") ?max min = let max = match max with None -> min | Some max -> max in { codes = (min, max); doc; docs } let info_codes i = i.codes let info_code i = fst i.codes let info_doc i = i.doc let info_docs i = i.docs let info_order i0 i1 = compare i0.codes i1.codes let defaults = [ info ok ~doc:"on success."; info some_error ~doc:"on indiscriminate errors reported on standard error."; info cli_error ~doc:"on command line parsing errors."; info internal_error ~doc:"on unexpected internal errors (bugs)."; ] end (* Environment variables *) module Env = struct type var = string type info = (* information about an environment variable. *) { id : int; (* unique id for the env var. *) deprecated : string option; var : string; (* the variable. *) doc : string; (* help. *) docs : string; } (* title of help section where listed. *) let info ?deprecated ?(docs = Cmdliner_manpage.s_environment) ?(doc = "See option $(opt).") var = { id = Cmdliner_base.uid (); deprecated; var; doc; docs } let info_deprecated i = i.deprecated let info_var i = i.var let info_doc i = i.doc let info_docs i = i.docs let info_compare i0 i1 = Int.compare i0.id i1.id module Set = Set.Make (struct type t = info let compare = info_compare end) end (* Arguments *) module Arg = struct type absence = Err | Val of string Lazy.t | Doc of string type opt_kind = Flag | Opt | Opt_vopt of string type pos_kind = (* information about a positional argument. *) { pos_rev : bool; (* if [true] positions are counted from the end. *) pos_start : int; (* start positional argument. *) pos_len : int option } (* number of arguments or [None] if unbounded. *) let pos ~rev:pos_rev ~start:pos_start ~len:pos_len = { pos_rev; pos_start; pos_len} let pos_rev p = p.pos_rev let pos_start p = p.pos_start let pos_len p = p.pos_len type t = (* information about a command line argument. *) { id : int; (* unique id for the argument. *) deprecated : string option; (* deprecation message *) absent : absence; (* behaviour if absent. *) env : Env.info option; (* environment variable for default value. *) doc : string; (* help. *) docv : string; (* variable name for the argument in help. *) docs : string; (* title of help section where listed. *) pos : pos_kind; (* positional arg kind. *) opt_kind : opt_kind; (* optional arg kind. *) opt_names : string list; (* names (for opt args). *) opt_all : bool; } (* repeatable (for opt args). *) let dumb_pos = pos ~rev:false ~start:(-1) ~len:None let v ?deprecated ?(absent = "") ?docs ?(docv = "") ?(doc = "") ?env names = let dash n = if String.length n = 1 then "-" ^ n else "--" ^ n in let opt_names = List.map dash names in let docs = match docs with | Some s -> s | None -> match names with | [] -> Cmdliner_manpage.s_arguments | _ -> Cmdliner_manpage.s_options in { id = Cmdliner_base.uid (); deprecated; absent = Doc absent; env; doc; docv; docs; pos = dumb_pos; opt_kind = Flag; opt_names; opt_all = false; } let id a = a.id let deprecated a = a.deprecated let absent a = a.absent let env a = a.env let doc a = a.doc let docv a = a.docv let docs a = a.docs let pos_kind a = a.pos let opt_kind a = a.opt_kind let opt_names a = a.opt_names let opt_all a = a.opt_all let opt_name_sample a = (* First long or short name (in that order) in the list; this allows the client to control which name is shown *) let rec find = function | [] -> List.hd a.opt_names | n :: ns -> if (String.length n) > 2 then n else find ns in find a.opt_names let make_req a = { a with absent = Err } let make_all_opts a = { a with opt_all = true } let make_opt ~absent ~kind:opt_kind a = { a with absent; opt_kind } let make_opt_all ~absent ~kind:opt_kind a = { a with absent; opt_kind; opt_all = true } let make_pos ~pos a = { a with pos } let make_pos_abs ~absent ~pos a = { a with absent; pos } let is_opt a = a.opt_names <> [] let is_pos a = a.opt_names = [] let is_req a = a.absent = Err let pos_cli_order a0 a1 = (* best-effort order on the cli. *) let c = compare (a0.pos.pos_rev) (a1.pos.pos_rev) in if c <> 0 then c else if a0.pos.pos_rev then compare a1.pos.pos_start a0.pos.pos_start else compare a0.pos.pos_start a1.pos.pos_start let rev_pos_cli_order a0 a1 = pos_cli_order a1 a0 let compare a0 a1 = Int.compare a0.id a1.id module Set = Set.Make (struct type nonrec t = t let compare = compare end) end (* Commands *) module Cmd = struct type t = { name : string; (* name of the cmd. *) version : string option; (* version (for --version). *) deprecated : string option; (* deprecation message *) doc : string; (* one line description of cmd. *) docs : string; (* title of man section where listed (commands). *) sdocs : string; (* standard options, title of section where listed. *) exits : Exit.info list; (* exit codes for the cmd. *) envs : Env.info list; (* env vars that influence the cmd. *) man : Cmdliner_manpage.block list; (* man page text. *) man_xrefs : Cmdliner_manpage.xref list; (* man cross-refs. *) args : Arg.Set.t; (* Command arguments. *) has_args : bool; (* [true] if has own parsing term. *) children : t list; } (* Children, if any. *) let v ?deprecated ?(man_xrefs = [`Main]) ?(man = []) ?(envs = []) ?(exits = Exit.defaults) ?(sdocs = Cmdliner_manpage.s_common_options) ?(docs = Cmdliner_manpage.s_commands) ?(doc = "") ?version name = { name; version; deprecated; doc; docs; sdocs; exits; envs; man; man_xrefs; args = Arg.Set.empty; has_args = true; children = [] } let name t = t.name let version t = t.version let deprecated t = t.deprecated let doc t = t.doc let docs t = t.docs let stdopts_docs t = t.sdocs let exits t = t.exits let envs t = t.envs let man t = t.man let man_xrefs t = t.man_xrefs let args t = t.args let has_args t = t.has_args let children t = t.children let add_args t args = { t with args = Arg.Set.union args t.args } let with_children cmd ~args ~children = let has_args, args = match args with | None -> false, cmd.args | Some args -> true, Arg.Set.union args cmd.args in { cmd with has_args; args; children } end (* Evaluation *) module Eval = struct type t = (* information about the evaluation context. *) { cmd : Cmd.t; (* cmd being evaluated. *) parents : Cmd.t list; (* parents of cmd, root is last. *) env : string -> string option; (* environment variable lookup. *) err_ppf : Format.formatter (* error formatter *) } let v ~cmd ~parents ~env ~err_ppf = { cmd; parents; env; err_ppf } let cmd e = e.cmd let parents e = e.parents let env_var e v = e.env v let err_ppf e = e.err_ppf let main e = match List.rev e.parents with [] -> e.cmd | m :: _ -> m let with_cmd ei cmd = { ei with cmd } end (*--------------------------------------------------------------------------- Copyright (c) 2011 The cmdliner programmers Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ---------------------------------------------------------------------------*)
(*--------------------------------------------------------------------------- Copyright (c) 2011 The cmdliner programmers. All rights reserved. Distributed under the ISC license, see terms at the end of the file. ---------------------------------------------------------------------------*)
roll_storage.ml
open Misc type error += | Consume_roll_change (* `Permanent *) | No_roll_for_delegate (* `Permanent *) | No_roll_snapshot_for_cycle of Cycle_repr.t (* `Permanent *) | Unregistered_delegate of Signature.Public_key_hash.t (* `Permanent *) let () = let open Data_encoding in (* Consume roll change *) register_error_kind `Permanent ~id:"contract.manager.consume_roll_change" ~title:"Consume roll change" ~description:"Change is not enough to consume a roll." ~pp:(fun ppf () -> Format.fprintf ppf "Not enough change to consume a roll.") empty (function Consume_roll_change -> Some () | _ -> None) (fun () -> Consume_roll_change) ; (* No roll for delegate *) register_error_kind `Permanent ~id:"contract.manager.no_roll_for_delegate" ~title:"No roll for delegate" ~description:"Delegate has no roll." ~pp:(fun ppf () -> Format.fprintf ppf "Delegate has no roll.") empty (function No_roll_for_delegate -> Some () | _ -> None) (fun () -> No_roll_for_delegate) ; (* No roll snapshot for cycle *) register_error_kind `Permanent ~id:"contract.manager.no_roll_snapshot_for_cycle" ~title:"No roll snapshot for cycle" ~description:"A snapshot of the rolls distribution does not exist for this cycle." ~pp:(fun ppf c -> Format.fprintf ppf "A snapshot of the rolls distribution does not exist for cycle %a" Cycle_repr.pp c) (obj1 (req "cycle" Cycle_repr.encoding)) (function No_roll_snapshot_for_cycle c-> Some c | _ -> None) (fun c -> No_roll_snapshot_for_cycle c) ; (* Unregistered delegate *) register_error_kind `Permanent ~id:"contract.manager.unregistered_delegate" ~title:"Unregistered delegate" ~description:"A contract cannot be delegated to an unregistered delegate" ~pp:(fun ppf k-> Format.fprintf ppf "The provided public key (with hash %a) is \ \ not registered as valid delegate key." Signature.Public_key_hash.pp k) (obj1 (req "hash" Signature.Public_key_hash.encoding)) (function Unregistered_delegate k -> Some k | _ -> None) (fun k -> Unregistered_delegate k) let get_contract_delegate c contract = Storage.Contract.Delegate.get_option c contract let delegate_pubkey ctxt delegate = Storage.Contract.Manager.get_option ctxt (Contract_repr.implicit_contract delegate) >>=? function | None | Some (Manager_repr.Hash _) -> fail (Unregistered_delegate delegate) | Some (Manager_repr.Public_key pk) -> return pk let clear_cycle c cycle = Storage.Roll.Snapshot_for_cycle.get c cycle >>=? fun index -> Storage.Roll.Snapshot_for_cycle.delete c cycle >>=? fun c -> Storage.Roll.Last_for_snapshot.delete (c, cycle) index >>=? fun c -> Storage.Roll.Owner.delete_snapshot c (cycle, index) >>= fun c -> return c let fold ctxt ~f init = Storage.Roll.Next.get ctxt >>=? fun last -> let rec loop ctxt roll acc = acc >>=? fun acc -> if Roll_repr.(roll = last) then return acc else Storage.Roll.Owner.get_option ctxt roll >>=? function | None -> loop ctxt (Roll_repr.succ roll) (return acc) | Some delegate -> loop ctxt (Roll_repr.succ roll) (f roll delegate acc) in loop ctxt Roll_repr.first (return init) let snapshot_rolls_for_cycle ctxt cycle = Storage.Roll.Snapshot_for_cycle.get ctxt cycle >>=? fun index -> Storage.Roll.Snapshot_for_cycle.set ctxt cycle (index + 1) >>=? fun ctxt -> Storage.Roll.Owner.snapshot ctxt (cycle, index) >>=? fun ctxt -> Storage.Roll.Next.get ctxt >>=? fun last -> Storage.Roll.Last_for_snapshot.init (ctxt, cycle) index last >>=? fun ctxt -> return ctxt let freeze_rolls_for_cycle ctxt cycle = Storage.Roll.Snapshot_for_cycle.get ctxt cycle >>=? fun max_index -> Storage.Seed.For_cycle.get ctxt cycle >>=? fun seed -> let rd = Seed_repr.initialize_new seed [MBytes.of_string "roll_snapshot"] in let seq = Seed_repr.sequence rd 0l in let selected_index = Seed_repr.take_int32 seq (Int32.of_int max_index) |> fst |> Int32.to_int in Storage.Roll.Snapshot_for_cycle.set ctxt cycle selected_index >>=? fun ctxt -> fold_left_s (fun ctxt index -> if Compare.Int.(index = selected_index) then return ctxt else Storage.Roll.Owner.delete_snapshot ctxt (cycle, index) >>= fun ctxt -> Storage.Roll.Last_for_snapshot.delete (ctxt, cycle) index >>=? fun ctxt -> return ctxt ) ctxt Misc.(0 --> (max_index - 1)) >>=? fun ctxt -> return ctxt (* Roll selection *) module Random = struct let int32_to_bytes i = let b = MBytes.create 4 in MBytes.set_int32 b 0 i; b let level_random seed use level = let position = level.Level_repr.cycle_position in Seed_repr.initialize_new seed [MBytes.of_string ("level "^use^":"); int32_to_bytes position] let owner c kind level offset = let cycle = level.Level_repr.cycle in Seed_storage.for_cycle c cycle >>=? fun random_seed -> let rd = level_random random_seed kind level in let sequence = Seed_repr.sequence rd (Int32.of_int offset) in Storage.Roll.Snapshot_for_cycle.get c cycle >>=? fun index -> Storage.Roll.Last_for_snapshot.get (c, cycle) index >>=? fun bound -> let rec loop sequence = let roll, sequence = Roll_repr.random sequence ~bound in Storage.Roll.Owner.Snapshot.get_option c ((cycle, index), roll) >>=? function | None -> loop sequence | Some delegate -> return delegate in Storage.Roll.Owner.snapshot_exists c (cycle, index) >>= fun snapshot_exists -> fail_unless snapshot_exists (No_roll_snapshot_for_cycle cycle) >>=? fun () -> loop sequence end let baking_rights_owner c level ~priority = Random.owner c "baking" level priority let endorsement_rights_owner c level ~slot = Random.owner c "endorsement" level slot let traverse_rolls ctxt head = let rec loop acc roll = Storage.Roll.Successor.get_option ctxt roll >>=? function | None -> return (List.rev acc) | Some next -> loop (next :: acc) next in loop [head] head let get_rolls ctxt delegate = Storage.Roll.Delegate_roll_list.get_option ctxt delegate >>=? function | None -> return_nil | Some head_roll -> traverse_rolls ctxt head_roll let count_rolls ctxt delegate = Storage.Roll.Delegate_roll_list.get_option ctxt delegate >>=? function | None -> return 0 | Some head_roll -> let rec loop acc roll = Storage.Roll.Successor.get_option ctxt roll >>=? function | None -> return acc | Some next -> loop (succ acc) next in loop 1 head_roll let get_change c delegate = Storage.Roll.Delegate_change.get_option c delegate >>=? function | None -> return Tez_repr.zero | Some change -> return change module Delegate = struct let fresh_roll c = Storage.Roll.Next.get c >>=? fun roll -> Storage.Roll.Next.set c (Roll_repr.succ roll) >>=? fun c -> return (roll, c) let get_limbo_roll c = Storage.Roll.Limbo.get_option c >>=? function | None -> fresh_roll c >>=? fun (roll, c) -> Storage.Roll.Limbo.init c roll >>=? fun c -> return (roll, c) | Some roll -> return (roll, c) let consume_roll_change c delegate = let tokens_per_roll = Constants_storage.tokens_per_roll c in Storage.Roll.Delegate_change.get c delegate >>=? fun change -> trace Consume_roll_change (Lwt.return Tez_repr.(change -? tokens_per_roll)) >>=? fun new_change -> Storage.Roll.Delegate_change.set c delegate new_change let recover_roll_change c delegate = let tokens_per_roll = Constants_storage.tokens_per_roll c in Storage.Roll.Delegate_change.get c delegate >>=? fun change -> Lwt.return Tez_repr.(change +? tokens_per_roll) >>=? fun new_change -> Storage.Roll.Delegate_change.set c delegate new_change let pop_roll_from_delegate c delegate = recover_roll_change c delegate >>=? fun c -> (* beginning: delegate : roll -> successor_roll -> ... limbo : limbo_head -> ... *) Storage.Roll.Limbo.get_option c >>=? fun limbo_head -> Storage.Roll.Delegate_roll_list.get_option c delegate >>=? function | None -> fail No_roll_for_delegate | Some roll -> Storage.Roll.Owner.delete c roll >>=? fun c -> Storage.Roll.Successor.get_option c roll >>=? fun successor_roll -> Storage.Roll.Delegate_roll_list.set_option c delegate successor_roll >>= fun c -> (* delegate : successor_roll -> ... roll ------^ limbo : limbo_head -> ... *) Storage.Roll.Successor.set_option c roll limbo_head >>= fun c -> (* delegate : successor_roll -> ... roll ------v limbo : limbo_head -> ... *) Storage.Roll.Limbo.init_set c roll >>= fun c -> (* delegate : successor_roll -> ... limbo : roll -> limbo_head -> ... *) return (roll, c) let create_roll_in_delegate c delegate delegate_pk = consume_roll_change c delegate >>=? fun c -> (* beginning: delegate : delegate_head -> ... limbo : roll -> limbo_successor -> ... *) Storage.Roll.Delegate_roll_list.get_option c delegate >>=? fun delegate_head -> get_limbo_roll c >>=? fun (roll, c) -> Storage.Roll.Owner.init c roll delegate_pk >>=? fun c -> Storage.Roll.Successor.get_option c roll >>=? fun limbo_successor -> Storage.Roll.Limbo.set_option c limbo_successor >>= fun c -> (* delegate : delegate_head -> ... roll ------v limbo : limbo_successor -> ... *) Storage.Roll.Successor.set_option c roll delegate_head >>= fun c -> (* delegate : delegate_head -> ... roll ------^ limbo : limbo_successor -> ... *) Storage.Roll.Delegate_roll_list.init_set c delegate roll >>= fun c -> (* delegate : roll -> delegate_head -> ... limbo : limbo_successor -> ... *) return c let ensure_inited c delegate = Storage.Roll.Delegate_change.mem c delegate >>= function | true -> return c | false -> Storage.Roll.Delegate_change.init c delegate Tez_repr.zero let is_inactive c delegate = Storage.Contract.Inactive_delegate.mem c (Contract_repr.implicit_contract delegate) >>= fun inactive -> if inactive then return inactive else Storage.Contract.Delegate_desactivation.get_option c (Contract_repr.implicit_contract delegate) >>=? function | Some last_active_cycle -> let { Level_repr.cycle = current_cycle } = Raw_context.current_level c in return Cycle_repr.(last_active_cycle < current_cycle) | None -> (* This case is only when called from `set_active`, when creating a contract. *) return_false let add_amount c delegate amount = ensure_inited c delegate >>=? fun c -> let tokens_per_roll = Constants_storage.tokens_per_roll c in Storage.Roll.Delegate_change.get c delegate >>=? fun change -> Lwt.return Tez_repr.(amount +? change) >>=? fun change -> Storage.Roll.Delegate_change.set c delegate change >>=? fun c -> delegate_pubkey c delegate >>=? fun delegate_pk -> let rec loop c change = if Tez_repr.(change < tokens_per_roll) then return c else Lwt.return Tez_repr.(change -? tokens_per_roll) >>=? fun change -> create_roll_in_delegate c delegate delegate_pk >>=? fun c -> loop c change in is_inactive c delegate >>=? fun inactive -> if inactive then return c else loop c change >>=? fun c -> Storage.Roll.Delegate_roll_list.get_option c delegate >>=? fun rolls -> match rolls with | None -> return c | Some _ -> Storage.Active_delegates_with_rolls.add c delegate >>= fun c -> return c let remove_amount c delegate amount = let tokens_per_roll = Constants_storage.tokens_per_roll c in let rec loop c change = if Tez_repr.(amount <= change) then return (c, change) else pop_roll_from_delegate c delegate >>=? fun (_, c) -> Lwt.return Tez_repr.(change +? tokens_per_roll) >>=? fun change -> loop c change in Storage.Roll.Delegate_change.get c delegate >>=? fun change -> is_inactive c delegate >>=? fun inactive -> begin if inactive then return (c, change) else loop c change >>=? fun (c, change) -> Storage.Roll.Delegate_roll_list.get_option c delegate >>=? fun rolls -> match rolls with | None -> Storage.Active_delegates_with_rolls.del c delegate >>= fun c -> return (c, change) | Some _ -> return (c, change) end >>=? fun (c, change) -> Lwt.return Tez_repr.(change -? amount) >>=? fun change -> Storage.Roll.Delegate_change.set c delegate change let set_inactive ctxt delegate = ensure_inited ctxt delegate >>=? fun ctxt -> let tokens_per_roll = Constants_storage.tokens_per_roll ctxt in Storage.Roll.Delegate_change.get ctxt delegate >>=? fun change -> Storage.Contract.Inactive_delegate.add ctxt (Contract_repr.implicit_contract delegate) >>= fun ctxt -> Storage.Active_delegates_with_rolls.del ctxt delegate >>= fun ctxt -> let rec loop ctxt change = Storage.Roll.Delegate_roll_list.get_option ctxt delegate >>=? function | None -> return (ctxt, change) | Some _roll -> pop_roll_from_delegate ctxt delegate >>=? fun (_, ctxt) -> Lwt.return Tez_repr.(change +? tokens_per_roll) >>=? fun change -> loop ctxt change in loop ctxt change >>=? fun (ctxt, change) -> Storage.Roll.Delegate_change.set ctxt delegate change >>=? fun ctxt -> return ctxt let set_active ctxt delegate = is_inactive ctxt delegate >>=? fun inactive -> let current_cycle = (Raw_context.current_level ctxt).cycle in let preserved_cycles = Constants_storage.preserved_cycles ctxt in (* When the delegate is new or inactive, she will become active in `1+preserved_cycles`, and we allow `preserved_cycles` for the delegate to start baking. When the delegate is active, we only give her at least `preserved_cycles` after the current cycle before to be deactivated. *) Storage.Contract.Delegate_desactivation.get_option ctxt (Contract_repr.implicit_contract delegate) >>=? fun current_expiration -> let expiration = match current_expiration with | None -> Cycle_repr.add current_cycle (1+2*preserved_cycles) | Some current_expiration -> let delay = if inactive then (1+2*preserved_cycles) else 1+preserved_cycles in let updated = Cycle_repr.add current_cycle delay in Cycle_repr.max current_expiration updated in Storage.Contract.Delegate_desactivation.init_set ctxt (Contract_repr.implicit_contract delegate) expiration >>= fun ctxt -> if not inactive then return ctxt else begin ensure_inited ctxt delegate >>=? fun ctxt -> let tokens_per_roll = Constants_storage.tokens_per_roll ctxt in Storage.Roll.Delegate_change.get ctxt delegate >>=? fun change -> Storage.Contract.Inactive_delegate.del ctxt (Contract_repr.implicit_contract delegate) >>= fun ctxt -> delegate_pubkey ctxt delegate >>=? fun delegate_pk -> let rec loop ctxt change = if Tez_repr.(change < tokens_per_roll) then return ctxt else Lwt.return Tez_repr.(change -? tokens_per_roll) >>=? fun change -> create_roll_in_delegate ctxt delegate delegate_pk >>=? fun ctxt -> loop ctxt change in loop ctxt change >>=? fun ctxt -> Storage.Roll.Delegate_roll_list.get_option ctxt delegate >>=? fun rolls -> match rolls with | None -> return ctxt | Some _ -> Storage.Active_delegates_with_rolls.add ctxt delegate >>= fun ctxt -> return ctxt end end module Contract = struct let add_amount c contract amount = get_contract_delegate c contract >>=? function | None -> return c | Some delegate -> Delegate.add_amount c delegate amount let remove_amount c contract amount = get_contract_delegate c contract >>=? function | None -> return c | Some delegate -> Delegate.remove_amount c delegate amount end let init ctxt = Storage.Roll.Next.init ctxt Roll_repr.first let init_first_cycles ctxt = let preserved = Constants_storage.preserved_cycles ctxt in (* Precompute rolls for cycle (0 --> preserved_cycles) *) List.fold_left (fun ctxt c -> ctxt >>=? fun ctxt -> let cycle = Cycle_repr.of_int32_exn (Int32.of_int c) in Storage.Roll.Snapshot_for_cycle.init ctxt cycle 0 >>=? fun ctxt -> snapshot_rolls_for_cycle ctxt cycle >>=? fun ctxt -> freeze_rolls_for_cycle ctxt cycle) (return ctxt) (0 --> preserved) >>=? fun ctxt -> let cycle = Cycle_repr.of_int32_exn (Int32.of_int (preserved + 1)) in (* Precomputed a snapshot for cycle (preserved_cycles + 1) *) Storage.Roll.Snapshot_for_cycle.init ctxt cycle 0 >>=? fun ctxt -> snapshot_rolls_for_cycle ctxt cycle >>=? fun ctxt -> (* Prepare storage for storing snapshots for cycle (preserved_cycles+2) *) let cycle = Cycle_repr.of_int32_exn (Int32.of_int (preserved + 2)) in Storage.Roll.Snapshot_for_cycle.init ctxt cycle 0 >>=? fun ctxt -> return ctxt let snapshot_rolls ctxt = let current_level = Raw_context.current_level ctxt in let preserved = Constants_storage.preserved_cycles ctxt in let cycle = Cycle_repr.add current_level.cycle (preserved+2) in snapshot_rolls_for_cycle ctxt cycle let cycle_end ctxt last_cycle = let preserved = Constants_storage.preserved_cycles ctxt in begin match Cycle_repr.sub last_cycle preserved with | None -> return ctxt | Some cleared_cycle -> clear_cycle ctxt cleared_cycle end >>=? fun ctxt -> let frozen_roll_cycle = Cycle_repr.add last_cycle (preserved+1) in freeze_rolls_for_cycle ctxt frozen_roll_cycle >>=? fun ctxt -> Storage.Roll.Snapshot_for_cycle.init ctxt (Cycle_repr.succ (Cycle_repr.succ frozen_roll_cycle)) 0 >>=? fun ctxt -> return ctxt let update_tokens_per_roll ctxt new_tokens_per_roll = let constants = Raw_context.constants ctxt in let old_tokens_per_roll = constants.tokens_per_roll in Raw_context.patch_constants ctxt begin fun constants -> { constants with Constants_repr.tokens_per_roll = new_tokens_per_roll } end >>= fun ctxt -> let decrease = Tez_repr.(new_tokens_per_roll < old_tokens_per_roll) in begin if decrease then Lwt.return Tez_repr.(old_tokens_per_roll -? new_tokens_per_roll) else Lwt.return Tez_repr.(new_tokens_per_roll -? old_tokens_per_roll) end >>=? fun abs_diff -> Storage.Delegates.fold ctxt (Ok ctxt) begin fun pkh ctxt -> Lwt.return ctxt >>=? fun ctxt -> count_rolls ctxt pkh >>=? fun rolls -> Lwt.return Tez_repr.(abs_diff *? Int64.of_int rolls) >>=? fun amount -> if decrease then Delegate.add_amount ctxt pkh amount else Delegate.remove_amount ctxt pkh amount end
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
fmgr.h
#ifndef FMGR_H #define FMGR_H /* We don't want to include primnodes.h here, so make some stub references */ typedef struct Node *fmNodePtr; typedef struct Aggref *fmAggrefPtr; /* Likewise, avoid including execnodes.h here */ typedef void (*fmExprContextCallbackFunction) (Datum arg); /* Likewise, avoid including stringinfo.h here */ typedef struct StringInfoData *fmStringInfo; /* * All functions that can be called directly by fmgr must have this signature. * (Other functions can be called by using a handler that does have this * signature.) */ typedef struct FunctionCallInfoBaseData *FunctionCallInfo; typedef Datum (*PGFunction) (FunctionCallInfo fcinfo); /* * This struct holds the system-catalog information that must be looked up * before a function can be called through fmgr. If the same function is * to be called multiple times, the lookup need be done only once and the * info struct saved for re-use. * * Note that fn_expr really is parse-time-determined information about the * arguments, rather than about the function itself. But it's convenient to * store it here rather than in FunctionCallInfoBaseData, where it might more * logically belong. * * fn_extra is available for use by the called function; all other fields * should be treated as read-only after the struct is created. */ typedef struct FmgrInfo { PGFunction fn_addr; /* pointer to function or handler to be called */ Oid fn_oid; /* OID of function (NOT of handler, if any) */ short fn_nargs; /* number of input args (0..FUNC_MAX_ARGS) */ bool fn_strict; /* function is "strict" (NULL in => NULL out) */ bool fn_retset; /* function returns a set */ unsigned char fn_stats; /* collect stats if track_functions > this */ void *fn_extra; /* extra space for use by handler */ MemoryContext fn_mcxt; /* memory context to store fn_extra in */ fmNodePtr fn_expr; /* expression parse tree for call, or NULL */ } FmgrInfo; /* * This struct is the data actually passed to an fmgr-called function. * * The called function is expected to set isnull, and possibly resultinfo or * fields in whatever resultinfo points to. It should not change any other * fields. (In particular, scribbling on the argument arrays is a bad idea, * since some callers assume they can re-call with the same arguments.) * * Note that enough space for arguments needs to be provided, either by using * SizeForFunctionCallInfo() in dynamic allocations, or by using * LOCAL_FCINFO() for on-stack allocations. * * This struct is named *BaseData, rather than *Data, to break pre v12 code * that allocated FunctionCallInfoData itself, as it'd often silently break * old code due to no space for arguments being provided. */ typedef struct FunctionCallInfoBaseData { FmgrInfo *flinfo; /* ptr to lookup info used for this call */ fmNodePtr context; /* pass info about context of call */ fmNodePtr resultinfo; /* pass or return extra info about result */ Oid fncollation; /* collation for function to use */ #define FIELDNO_FUNCTIONCALLINFODATA_ISNULL 4 bool isnull; /* function must set true if result is NULL */ short nargs; /* # arguments actually passed */ #define FIELDNO_FUNCTIONCALLINFODATA_ARGS 6 NullableDatum args[FLEXIBLE_ARRAY_MEMBER]; } FunctionCallInfoBaseData; /* * Space needed for a FunctionCallInfoBaseData struct with sufficient space * for `nargs` arguments. */ #define SizeForFunctionCallInfo(nargs) \ (offsetof(FunctionCallInfoBaseData, args) + \ sizeof(NullableDatum) * (nargs)) /* * This macro ensures that `name` points to a stack-allocated * FunctionCallInfoBaseData struct with sufficient space for `nargs` arguments. */ #define LOCAL_FCINFO(name, nargs) \ /* use union with FunctionCallInfoBaseData to guarantee alignment */ \ union \ { \ FunctionCallInfoBaseData fcinfo; \ /* ensure enough space for nargs args is available */ \ char fcinfo_data[SizeForFunctionCallInfo(nargs)]; \ } name##data; \ FunctionCallInfo name = &name##data.fcinfo /* * This routine fills a FmgrInfo struct, given the OID * of the function to be called. */ extern void fmgr_info(Oid functionId, FmgrInfo *finfo); /* * Same, when the FmgrInfo struct is in a memory context longer-lived than * CurrentMemoryContext. The specified context will be set as fn_mcxt * and used to hold all subsidiary data of finfo. */ extern void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt); /* Convenience macro for setting the fn_expr field */ #define fmgr_info_set_expr(expr, finfo) \ ((finfo)->fn_expr = (expr)) /* * Copy an FmgrInfo struct */ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt); extern void fmgr_symbol(Oid functionId, char **mod, char **fn); /* * This macro initializes all the fields of a FunctionCallInfoBaseData except * for the args[] array. */ #define InitFunctionCallInfoData(Fcinfo, Flinfo, Nargs, Collation, Context, Resultinfo) \ do { \ (Fcinfo).flinfo = (Flinfo); \ (Fcinfo).context = (Context); \ (Fcinfo).resultinfo = (Resultinfo); \ (Fcinfo).fncollation = (Collation); \ (Fcinfo).isnull = false; \ (Fcinfo).nargs = (Nargs); \ } while (0) /* * This macro invokes a function given a filled-in FunctionCallInfoBaseData * struct. The macro result is the returned Datum --- but note that * caller must still check fcinfo->isnull! Also, if function is strict, * it is caller's responsibility to verify that no null arguments are present * before calling. * * Some code performs multiple calls without redoing InitFunctionCallInfoData, * possibly altering the argument values. This is okay, but be sure to reset * the fcinfo->isnull flag before each call, since callees are permitted to * assume that starts out false. */ #define FunctionCallInvoke(fcinfo) ((* (fcinfo)->flinfo->fn_addr) (fcinfo)) /*------------------------------------------------------------------------- * Support macros to ease writing fmgr-compatible functions * * A C-coded fmgr-compatible function should be declared as * * Datum * function_name(PG_FUNCTION_ARGS) * { * ... * } * * It should access its arguments using appropriate PG_GETARG_xxx macros * and should return its result using PG_RETURN_xxx. * *------------------------------------------------------------------------- */ /* Standard parameter list for fmgr-compatible functions */ #define PG_FUNCTION_ARGS FunctionCallInfo fcinfo /* * Get collation function should use. */ #define PG_GET_COLLATION() (fcinfo->fncollation) /* * Get number of arguments passed to function. */ #define PG_NARGS() (fcinfo->nargs) /* * If function is not marked "proisstrict" in pg_proc, it must check for * null arguments using this macro. Do not try to GETARG a null argument! */ #define PG_ARGISNULL(n) (fcinfo->args[n].isnull) /* * Support for fetching detoasted copies of toastable datatypes (all of * which are varlena types). pg_detoast_datum() gives you either the input * datum (if not toasted) or a detoasted copy allocated with palloc(). * pg_detoast_datum_copy() always gives you a palloc'd copy --- use it * if you need a modifiable copy of the input. Caller is expected to have * checked for null inputs first, if necessary. * * pg_detoast_datum_packed() will return packed (1-byte header) datums * unmodified. It will still expand an externally toasted or compressed datum. * The resulting datum can be accessed using VARSIZE_ANY() and VARDATA_ANY() * (beware of multiple evaluations in those macros!) * * In consumers oblivious to data alignment, call PG_DETOAST_DATUM_PACKED(), * VARDATA_ANY(), VARSIZE_ANY() and VARSIZE_ANY_EXHDR(). Elsewhere, call * PG_DETOAST_DATUM(), VARDATA() and VARSIZE(). Directly fetching an int16, * int32 or wider field in the struct representing the datum layout requires * aligned data. memcpy() is alignment-oblivious, as are most operations on * datatypes, such as text, whose layout struct contains only char fields. * * Note: it'd be nice if these could be macros, but I see no way to do that * without evaluating the arguments multiple times, which is NOT acceptable. */ extern struct varlena *pg_detoast_datum(struct varlena *datum); extern struct varlena *pg_detoast_datum_copy(struct varlena *datum); extern struct varlena *pg_detoast_datum_slice(struct varlena *datum, int32 first, int32 count); extern struct varlena *pg_detoast_datum_packed(struct varlena *datum); #define PG_DETOAST_DATUM(datum) \ pg_detoast_datum((struct varlena *) DatumGetPointer(datum)) #define PG_DETOAST_DATUM_COPY(datum) \ pg_detoast_datum_copy((struct varlena *) DatumGetPointer(datum)) #define PG_DETOAST_DATUM_SLICE(datum,f,c) \ pg_detoast_datum_slice((struct varlena *) DatumGetPointer(datum), \ (int32) (f), (int32) (c)) /* WARNING -- unaligned pointer */ #define PG_DETOAST_DATUM_PACKED(datum) \ pg_detoast_datum_packed((struct varlena *) DatumGetPointer(datum)) /* * Support for cleaning up detoasted copies of inputs. This must only * be used for pass-by-ref datatypes, and normally would only be used * for toastable types. If the given pointer is different from the * original argument, assume it's a palloc'd detoasted copy, and pfree it. * NOTE: most functions on toastable types do not have to worry about this, * but we currently require that support functions for indexes not leak * memory. */ #define PG_FREE_IF_COPY(ptr,n) \ do { \ if ((Pointer) (ptr) != PG_GETARG_POINTER(n)) \ pfree(ptr); \ } while (0) /* Macros for fetching arguments of standard types */ #define PG_GETARG_DATUM(n) (fcinfo->args[n].value) #define PG_GETARG_INT32(n) DatumGetInt32(PG_GETARG_DATUM(n)) #define PG_GETARG_UINT32(n) DatumGetUInt32(PG_GETARG_DATUM(n)) #define PG_GETARG_INT16(n) DatumGetInt16(PG_GETARG_DATUM(n)) #define PG_GETARG_UINT16(n) DatumGetUInt16(PG_GETARG_DATUM(n)) #define PG_GETARG_CHAR(n) DatumGetChar(PG_GETARG_DATUM(n)) #define PG_GETARG_BOOL(n) DatumGetBool(PG_GETARG_DATUM(n)) #define PG_GETARG_OID(n) DatumGetObjectId(PG_GETARG_DATUM(n)) #define PG_GETARG_POINTER(n) DatumGetPointer(PG_GETARG_DATUM(n)) #define PG_GETARG_CSTRING(n) DatumGetCString(PG_GETARG_DATUM(n)) #define PG_GETARG_NAME(n) DatumGetName(PG_GETARG_DATUM(n)) /* these macros hide the pass-by-reference-ness of the datatype: */ #define PG_GETARG_FLOAT4(n) DatumGetFloat4(PG_GETARG_DATUM(n)) #define PG_GETARG_FLOAT8(n) DatumGetFloat8(PG_GETARG_DATUM(n)) #define PG_GETARG_INT64(n) DatumGetInt64(PG_GETARG_DATUM(n)) /* use this if you want the raw, possibly-toasted input datum: */ #define PG_GETARG_RAW_VARLENA_P(n) ((struct varlena *) PG_GETARG_POINTER(n)) /* use this if you want the input datum de-toasted: */ #define PG_GETARG_VARLENA_P(n) PG_DETOAST_DATUM(PG_GETARG_DATUM(n)) /* and this if you can handle 1-byte-header datums: */ #define PG_GETARG_VARLENA_PP(n) PG_DETOAST_DATUM_PACKED(PG_GETARG_DATUM(n)) /* DatumGetFoo macros for varlena types will typically look like this: */ #define DatumGetByteaPP(X) ((bytea *) PG_DETOAST_DATUM_PACKED(X)) #define DatumGetTextPP(X) ((text *) PG_DETOAST_DATUM_PACKED(X)) #define DatumGetBpCharPP(X) ((BpChar *) PG_DETOAST_DATUM_PACKED(X)) #define DatumGetVarCharPP(X) ((VarChar *) PG_DETOAST_DATUM_PACKED(X)) #define DatumGetHeapTupleHeader(X) ((HeapTupleHeader) PG_DETOAST_DATUM(X)) /* And we also offer variants that return an OK-to-write copy */ #define DatumGetByteaPCopy(X) ((bytea *) PG_DETOAST_DATUM_COPY(X)) #define DatumGetTextPCopy(X) ((text *) PG_DETOAST_DATUM_COPY(X)) #define DatumGetBpCharPCopy(X) ((BpChar *) PG_DETOAST_DATUM_COPY(X)) #define DatumGetVarCharPCopy(X) ((VarChar *) PG_DETOAST_DATUM_COPY(X)) #define DatumGetHeapTupleHeaderCopy(X) ((HeapTupleHeader) PG_DETOAST_DATUM_COPY(X)) /* Variants which return n bytes starting at pos. m */ #define DatumGetByteaPSlice(X,m,n) ((bytea *) PG_DETOAST_DATUM_SLICE(X,m,n)) #define DatumGetTextPSlice(X,m,n) ((text *) PG_DETOAST_DATUM_SLICE(X,m,n)) #define DatumGetBpCharPSlice(X,m,n) ((BpChar *) PG_DETOAST_DATUM_SLICE(X,m,n)) #define DatumGetVarCharPSlice(X,m,n) ((VarChar *) PG_DETOAST_DATUM_SLICE(X,m,n)) /* GETARG macros for varlena types will typically look like this: */ #define PG_GETARG_BYTEA_PP(n) DatumGetByteaPP(PG_GETARG_DATUM(n)) #define PG_GETARG_TEXT_PP(n) DatumGetTextPP(PG_GETARG_DATUM(n)) #define PG_GETARG_BPCHAR_PP(n) DatumGetBpCharPP(PG_GETARG_DATUM(n)) #define PG_GETARG_VARCHAR_PP(n) DatumGetVarCharPP(PG_GETARG_DATUM(n)) #define PG_GETARG_HEAPTUPLEHEADER(n) DatumGetHeapTupleHeader(PG_GETARG_DATUM(n)) /* And we also offer variants that return an OK-to-write copy */ #define PG_GETARG_BYTEA_P_COPY(n) DatumGetByteaPCopy(PG_GETARG_DATUM(n)) #define PG_GETARG_TEXT_P_COPY(n) DatumGetTextPCopy(PG_GETARG_DATUM(n)) #define PG_GETARG_BPCHAR_P_COPY(n) DatumGetBpCharPCopy(PG_GETARG_DATUM(n)) #define PG_GETARG_VARCHAR_P_COPY(n) DatumGetVarCharPCopy(PG_GETARG_DATUM(n)) #define PG_GETARG_HEAPTUPLEHEADER_COPY(n) DatumGetHeapTupleHeaderCopy(PG_GETARG_DATUM(n)) /* And a b-byte slice from position a -also OK to write */ #define PG_GETARG_BYTEA_P_SLICE(n,a,b) DatumGetByteaPSlice(PG_GETARG_DATUM(n),a,b) #define PG_GETARG_TEXT_P_SLICE(n,a,b) DatumGetTextPSlice(PG_GETARG_DATUM(n),a,b) #define PG_GETARG_BPCHAR_P_SLICE(n,a,b) DatumGetBpCharPSlice(PG_GETARG_DATUM(n),a,b) #define PG_GETARG_VARCHAR_P_SLICE(n,a,b) DatumGetVarCharPSlice(PG_GETARG_DATUM(n),a,b) /* * Obsolescent variants that guarantee INT alignment for the return value. * Few operations on these particular types need alignment, mainly operations * that cast the VARDATA pointer to a type like int16[]. Most code should use * the ...PP(X) counterpart. Nonetheless, these appear frequently in code * predating the PostgreSQL 8.3 introduction of the ...PP(X) variants. */ #define DatumGetByteaP(X) ((bytea *) PG_DETOAST_DATUM(X)) #define DatumGetTextP(X) ((text *) PG_DETOAST_DATUM(X)) #define DatumGetBpCharP(X) ((BpChar *) PG_DETOAST_DATUM(X)) #define DatumGetVarCharP(X) ((VarChar *) PG_DETOAST_DATUM(X)) #define PG_GETARG_BYTEA_P(n) DatumGetByteaP(PG_GETARG_DATUM(n)) #define PG_GETARG_TEXT_P(n) DatumGetTextP(PG_GETARG_DATUM(n)) #define PG_GETARG_BPCHAR_P(n) DatumGetBpCharP(PG_GETARG_DATUM(n)) #define PG_GETARG_VARCHAR_P(n) DatumGetVarCharP(PG_GETARG_DATUM(n)) /* To access options from opclass support functions use this: */ #define PG_HAS_OPCLASS_OPTIONS() has_fn_opclass_options(fcinfo->flinfo) #define PG_GET_OPCLASS_OPTIONS() get_fn_opclass_options(fcinfo->flinfo) /* To return a NULL do this: */ #define PG_RETURN_NULL() \ do { fcinfo->isnull = true; return (Datum) 0; } while (0) /* A few internal functions return void (which is not the same as NULL!) */ #define PG_RETURN_VOID() return (Datum) 0 /* Macros for returning results of standard types */ #define PG_RETURN_DATUM(x) return (x) #define PG_RETURN_INT32(x) return Int32GetDatum(x) #define PG_RETURN_UINT32(x) return UInt32GetDatum(x) #define PG_RETURN_INT16(x) return Int16GetDatum(x) #define PG_RETURN_UINT16(x) return UInt16GetDatum(x) #define PG_RETURN_CHAR(x) return CharGetDatum(x) #define PG_RETURN_BOOL(x) return BoolGetDatum(x) #define PG_RETURN_OID(x) return ObjectIdGetDatum(x) #define PG_RETURN_POINTER(x) return PointerGetDatum(x) #define PG_RETURN_CSTRING(x) return CStringGetDatum(x) #define PG_RETURN_NAME(x) return NameGetDatum(x) /* these macros hide the pass-by-reference-ness of the datatype: */ #define PG_RETURN_FLOAT4(x) return Float4GetDatum(x) #define PG_RETURN_FLOAT8(x) return Float8GetDatum(x) #define PG_RETURN_INT64(x) return Int64GetDatum(x) #define PG_RETURN_UINT64(x) return UInt64GetDatum(x) /* RETURN macros for other pass-by-ref types will typically look like this: */ #define PG_RETURN_BYTEA_P(x) PG_RETURN_POINTER(x) #define PG_RETURN_TEXT_P(x) PG_RETURN_POINTER(x) #define PG_RETURN_BPCHAR_P(x) PG_RETURN_POINTER(x) #define PG_RETURN_VARCHAR_P(x) PG_RETURN_POINTER(x) #define PG_RETURN_HEAPTUPLEHEADER(x) return HeapTupleHeaderGetDatum(x) /*------------------------------------------------------------------------- * Support for detecting call convention of dynamically-loaded functions * * Dynamically loaded functions currently can only use the version-1 ("new * style") calling convention. Version-0 ("old style") is not supported * anymore. Version 1 is the call convention defined in this header file, and * must be accompanied by the macro call * * PG_FUNCTION_INFO_V1(function_name); * * Note that internal functions do not need this decoration since they are * assumed to be version-1. * *------------------------------------------------------------------------- */ typedef struct { int api_version; /* specifies call convention version number */ /* More fields may be added later, for version numbers > 1. */ } Pg_finfo_record; /* Expected signature of an info function */ typedef const Pg_finfo_record *(*PGFInfoFunction) (void); /* * Macro to build an info function associated with the given function name. * * As a convenience, also provide an "extern" declaration for the given * function name, so that writers of C functions need not write that too. * * On Windows, the function and info function must be exported. Our normal * build processes take care of that via .DEF files or --export-all-symbols. * Module authors using a different build process might need to manually * declare the function PGDLLEXPORT. We do that automatically here for the * info function, since authors shouldn't need to be explicitly aware of it. */ #define PG_FUNCTION_INFO_V1(funcname) \ extern Datum funcname(PG_FUNCTION_ARGS); \ extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ const Pg_finfo_record * \ CppConcat(pg_finfo_,funcname) (void) \ { \ static const Pg_finfo_record my_finfo = { 1 }; \ return &my_finfo; \ } \ extern int no_such_variable /*------------------------------------------------------------------------- * Support for verifying backend compatibility of loaded modules * * We require dynamically-loaded modules to include the macro call * PG_MODULE_MAGIC; * so that we can check for obvious incompatibility, such as being compiled * for a different major PostgreSQL version. * * To compile with versions of PostgreSQL that do not support this, * you may put an #ifdef/#endif test around it. Note that in a multiple- * source-file module, the macro call should only appear once. * * The specific items included in the magic block are intended to be ones that * are custom-configurable and especially likely to break dynamically loaded * modules if they were compiled with other values. Also, the length field * can be used to detect definition changes. * * Note: we compare magic blocks with memcmp(), so there had better not be * any alignment pad bytes in them. * * Note: when changing the contents of magic blocks, be sure to adjust the * incompatible_module_error() function in dfmgr.c. *------------------------------------------------------------------------- */ /* Definition of the magic block structure */ typedef struct { int len; /* sizeof(this struct) */ int version; /* PostgreSQL major version */ int funcmaxargs; /* FUNC_MAX_ARGS */ int indexmaxkeys; /* INDEX_MAX_KEYS */ int namedatalen; /* NAMEDATALEN */ int float8byval; /* FLOAT8PASSBYVAL */ } Pg_magic_struct; /* The actual data block contents */ #define PG_MODULE_MAGIC_DATA \ { \ sizeof(Pg_magic_struct), \ PG_VERSION_NUM / 100, \ FUNC_MAX_ARGS, \ INDEX_MAX_KEYS, \ NAMEDATALEN, \ FLOAT8PASSBYVAL \ } /* * Declare the module magic function. It needs to be a function as the dlsym * in the backend is only guaranteed to work on functions, not data */ typedef const Pg_magic_struct *(*PGModuleMagicFunction) (void); #define PG_MAGIC_FUNCTION_NAME Pg_magic_func #define PG_MAGIC_FUNCTION_NAME_STRING "Pg_magic_func" #define PG_MODULE_MAGIC \ extern PGDLLEXPORT const Pg_magic_struct *PG_MAGIC_FUNCTION_NAME(void); \ const Pg_magic_struct * \ PG_MAGIC_FUNCTION_NAME(void) \ { \ static const Pg_magic_struct Pg_magic_data = PG_MODULE_MAGIC_DATA; \ return &Pg_magic_data; \ } \ extern int no_such_variable /*------------------------------------------------------------------------- * Support routines and macros for callers of fmgr-compatible functions *------------------------------------------------------------------------- */ /* These are for invocation of a specifically named function with a * directly-computed parameter list. Note that neither arguments nor result * are allowed to be NULL. Also, the function cannot be one that needs to * look at FmgrInfo, since there won't be any. */ extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1); extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2); extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3); extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4); extern Datum DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5); extern Datum DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6); extern Datum DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7); extern Datum DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8); extern Datum DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9); /* * These functions work like the DirectFunctionCall functions except that * they use the flinfo parameter to initialise the fcinfo for the call. * It's recommended that the callee only use the fn_extra and fn_mcxt * fields, as other fields will typically describe the calling function * not the callee. Conversely, the calling function should not have * used fn_extra, unless its use is known to be compatible with the callee's. */ extern Datum CallerFInfoFunctionCall1(PGFunction func, FmgrInfo *flinfo, Oid collation, Datum arg1); extern Datum CallerFInfoFunctionCall2(PGFunction func, FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2); /* These are for invocation of a previously-looked-up function with a * directly-computed parameter list. Note that neither arguments nor result * are allowed to be NULL. */ extern Datum FunctionCall0Coll(FmgrInfo *flinfo, Oid collation); extern Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1); extern Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2); extern Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3); extern Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4); extern Datum FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5); extern Datum FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6); extern Datum FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7); extern Datum FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8); extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9); /* These are for invocation of a function identified by OID with a * directly-computed parameter list. Note that neither arguments nor result * are allowed to be NULL. These are essentially fmgr_info() followed by * FunctionCallN(). If the same function is to be invoked repeatedly, do the * fmgr_info() once and then use FunctionCallN(). */ extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation); extern Datum OidFunctionCall1Coll(Oid functionId, Oid collation, Datum arg1); extern Datum OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2); extern Datum OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3); extern Datum OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4); extern Datum OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5); extern Datum OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6); extern Datum OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7); extern Datum OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8); extern Datum OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9); /* These macros allow the collation argument to be omitted (with a default of * InvalidOid, ie, no collation). They exist mostly for backwards * compatibility of source code. */ #define DirectFunctionCall1(func, arg1) \ DirectFunctionCall1Coll(func, InvalidOid, arg1) #define DirectFunctionCall2(func, arg1, arg2) \ DirectFunctionCall2Coll(func, InvalidOid, arg1, arg2) #define DirectFunctionCall3(func, arg1, arg2, arg3) \ DirectFunctionCall3Coll(func, InvalidOid, arg1, arg2, arg3) #define DirectFunctionCall4(func, arg1, arg2, arg3, arg4) \ DirectFunctionCall4Coll(func, InvalidOid, arg1, arg2, arg3, arg4) #define DirectFunctionCall5(func, arg1, arg2, arg3, arg4, arg5) \ DirectFunctionCall5Coll(func, InvalidOid, arg1, arg2, arg3, arg4, arg5) #define DirectFunctionCall6(func, arg1, arg2, arg3, arg4, arg5, arg6) \ DirectFunctionCall6Coll(func, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6) #define DirectFunctionCall7(func, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ DirectFunctionCall7Coll(func, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7) #define DirectFunctionCall8(func, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ DirectFunctionCall8Coll(func, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) #define DirectFunctionCall9(func, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \ DirectFunctionCall9Coll(func, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) #define FunctionCall1(flinfo, arg1) \ FunctionCall1Coll(flinfo, InvalidOid, arg1) #define FunctionCall2(flinfo, arg1, arg2) \ FunctionCall2Coll(flinfo, InvalidOid, arg1, arg2) #define FunctionCall3(flinfo, arg1, arg2, arg3) \ FunctionCall3Coll(flinfo, InvalidOid, arg1, arg2, arg3) #define FunctionCall4(flinfo, arg1, arg2, arg3, arg4) \ FunctionCall4Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4) #define FunctionCall5(flinfo, arg1, arg2, arg3, arg4, arg5) \ FunctionCall5Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4, arg5) #define FunctionCall6(flinfo, arg1, arg2, arg3, arg4, arg5, arg6) \ FunctionCall6Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6) #define FunctionCall7(flinfo, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ FunctionCall7Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7) #define FunctionCall8(flinfo, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ FunctionCall8Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) #define FunctionCall9(flinfo, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \ FunctionCall9Coll(flinfo, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) #define OidFunctionCall0(functionId) \ OidFunctionCall0Coll(functionId, InvalidOid) #define OidFunctionCall1(functionId, arg1) \ OidFunctionCall1Coll(functionId, InvalidOid, arg1) #define OidFunctionCall2(functionId, arg1, arg2) \ OidFunctionCall2Coll(functionId, InvalidOid, arg1, arg2) #define OidFunctionCall3(functionId, arg1, arg2, arg3) \ OidFunctionCall3Coll(functionId, InvalidOid, arg1, arg2, arg3) #define OidFunctionCall4(functionId, arg1, arg2, arg3, arg4) \ OidFunctionCall4Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4) #define OidFunctionCall5(functionId, arg1, arg2, arg3, arg4, arg5) \ OidFunctionCall5Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4, arg5) #define OidFunctionCall6(functionId, arg1, arg2, arg3, arg4, arg5, arg6) \ OidFunctionCall6Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6) #define OidFunctionCall7(functionId, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ OidFunctionCall7Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7) #define OidFunctionCall8(functionId, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ OidFunctionCall8Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) #define OidFunctionCall9(functionId, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \ OidFunctionCall9Coll(functionId, InvalidOid, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) /* Special cases for convenient invocation of datatype I/O functions. */ extern Datum InputFunctionCall(FmgrInfo *flinfo, char *str, Oid typioparam, int32 typmod); extern Datum OidInputFunctionCall(Oid functionId, char *str, Oid typioparam, int32 typmod); extern char *OutputFunctionCall(FmgrInfo *flinfo, Datum val); extern char *OidOutputFunctionCall(Oid functionId, Datum val); extern Datum ReceiveFunctionCall(FmgrInfo *flinfo, fmStringInfo buf, Oid typioparam, int32 typmod); extern Datum OidReceiveFunctionCall(Oid functionId, fmStringInfo buf, Oid typioparam, int32 typmod); extern bytea *SendFunctionCall(FmgrInfo *flinfo, Datum val); extern bytea *OidSendFunctionCall(Oid functionId, Datum val); /* * Routines in fmgr.c */ extern const Pg_finfo_record *fetch_finfo_record(void *filehandle, const char *funcname); extern void clear_external_function_hash(void *filehandle); extern Oid fmgr_internal_function(const char *proname); extern Oid get_fn_expr_rettype(FmgrInfo *flinfo); extern Oid get_fn_expr_argtype(FmgrInfo *flinfo, int argnum); extern Oid get_call_expr_argtype(fmNodePtr expr, int argnum); extern bool get_fn_expr_arg_stable(FmgrInfo *flinfo, int argnum); extern bool get_call_expr_arg_stable(fmNodePtr expr, int argnum); extern bool get_fn_expr_variadic(FmgrInfo *flinfo); extern bytea *get_fn_opclass_options(FmgrInfo *flinfo); extern bool has_fn_opclass_options(FmgrInfo *flinfo); extern void set_fn_opclass_options(FmgrInfo *flinfo, bytea *options); extern bool CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid); /* * Routines in dfmgr.c */ extern char *Dynamic_library_path; extern PGFunction load_external_function(const char *filename, const char *funcname, bool signalNotFound, void **filehandle); extern PGFunction lookup_external_function(void *filehandle, const char *funcname); extern void load_file(const char *filename, bool restricted); extern void **find_rendezvous_variable(const char *varName); extern Size EstimateLibraryStateSpace(void); extern void SerializeLibraryState(Size maxsize, char *start_address); extern void RestoreLibraryState(char *start_address); /* * Support for aggregate functions * * These are actually in executor/nodeAgg.c, but we declare them here since * the whole point is for callers to not be overly friendly with nodeAgg. */ /* AggCheckCallContext can return one of the following codes, or 0: */ #define AGG_CONTEXT_AGGREGATE 1 /* regular aggregate */ #define AGG_CONTEXT_WINDOW 2 /* window function */ extern int AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext); extern fmAggrefPtr AggGetAggref(FunctionCallInfo fcinfo); extern MemoryContext AggGetTempMemoryContext(FunctionCallInfo fcinfo); extern bool AggStateIsShared(FunctionCallInfo fcinfo); extern void AggRegisterCallback(FunctionCallInfo fcinfo, fmExprContextCallbackFunction func, Datum arg); /* * We allow plugin modules to hook function entry/exit. This is intended * as support for loadable security policy modules, which may want to * perform additional privilege checks on function entry or exit, or to do * other internal bookkeeping. To make this possible, such modules must be * able not only to support normal function entry and exit, but also to trap * the case where we bail out due to an error; and they must also be able to * prevent inlining. */ typedef enum FmgrHookEventType { FHET_START, FHET_END, FHET_ABORT } FmgrHookEventType; typedef bool (*needs_fmgr_hook_type) (Oid fn_oid); typedef void (*fmgr_hook_type) (FmgrHookEventType event, FmgrInfo *flinfo, Datum *arg); extern PGDLLIMPORT needs_fmgr_hook_type needs_fmgr_hook; extern PGDLLIMPORT fmgr_hook_type fmgr_hook; #define FmgrHookIsNeeded(fn_oid) \ (!needs_fmgr_hook ? false : (*needs_fmgr_hook)(fn_oid)) #endif /* FMGR_H */
/*------------------------------------------------------------------------- * * fmgr.h * Definitions for the Postgres function manager and function-call * interface. * * This file must be included by all Postgres modules that either define * or call fmgr-callable functions. * * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fmgr.h * *------------------------------------------------------------------------- */
Tiny_httpd_dir.mli
(** Serving static content from directories This module provides the same functionality as the "http_of_dir" tool. It exposes a directory (and its subdirectories), with the optional ability to delete or upload files. @since 0.11 *) (** behavior of static directory. This controls what happens when the user requests the path to a directory rather than a file. *) type dir_behavior = | Index (** Redirect to index.html if present, else fails. *) | Lists (** Lists content of directory. Be careful of security implications. *) | Index_or_lists (** Redirect to index.html if present and lists content otherwise. This is useful for tilde ("~") directories and other per-user behavior, but be mindful of security implications *) | Forbidden (** Forbid access to directory. This is suited for serving assets, for example. *) type hidden (** Type used to prevent users from building a config directly. Use {!default_config} or {!config} instead. *) (** configuration for static file handlers. This might get more fields over time. *) type config = { mutable download: bool; (** Is downloading files allowed? *) mutable dir_behavior: dir_behavior; (** Behavior when serving a directory and not a file *) mutable delete: bool; (** Is deleting a file allowed? (with method DELETE) *) mutable upload: bool; (** Is uploading a file allowed? (with method PUT) *) mutable max_upload_size: int; (** If {!upload} is true, this is the maximum size in bytes for uploaded files. *) _rest: hidden; (** Just ignore this field. *) } (** default configuration: [ { download=true ; dir_behavior=Forbidden ; delete=false ; upload=false ; max_upload_size = 10 * 1024 * 1024 }] *) val default_config : unit -> config val config : ?download:bool -> ?dir_behavior:dir_behavior -> ?delete:bool -> ?upload:bool -> ?max_upload_size:int -> unit -> config (** Build a config from {!default_config}. @since 0.12 *) (** [add_dirpath ~config ~dir ~prefix server] adds route handle to the [server] to serve static files in [dir] when url starts with [prefix], using the given configuration [config]. *) val add_dir_path : config:config -> dir:string -> prefix:string -> Tiny_httpd_server.t -> unit (** Virtual file system. This is used to emulate a file system from pure OCaml functions and data, e.g. for resources bundled inside the web server. @since 0.12 *) module type VFS = sig val descr : string (** Description of the VFS *) val is_directory : string -> bool val contains : string -> bool (** [file_exists vfs path] returns [true] if [path] points to a file or directory inside [vfs]. *) val list_dir : string -> string array (** List directory. This only returns basenames, the files need to be put in the directory path using {!Filename.concat}. *) val delete : string -> unit (** Delete path *) val create : string -> (bytes -> int -> int -> unit) * (unit -> unit) (** Create a file and obtain a pair [write, close] *) val read_file_content : string -> Tiny_httpd_stream.t (** Read content of a file *) val file_size : string -> int option (** File size, e.g. using "stat" *) val file_mtime : string -> float option (** File modification time, e.g. using "stat" *) end val vfs_of_dir : string -> (module VFS) (** [vfs_of_dir dir] makes a virtual file system that reads from the disk. @since 0.12 *) val add_vfs : config:config -> vfs:(module VFS) -> prefix:string -> Tiny_httpd_server.t -> unit (** Similar to {!add_dir_path} but using a virtual file system instead. @since 0.12 *) (** An embedded file system, as a list of files with (relative) paths. This is useful in combination with the "tiny-httpd-mkfs" tool, which embeds the files it's given into a OCaml module. @since 0.12 *) module Embedded_fs : sig type t (** The pseudo-filesystem *) val create : ?mtime:float -> unit -> t val add_file : ?mtime:float -> t -> path:string -> string -> unit (** Add file to the virtual file system. @raise Invalid_argument if the path contains '..' or if it tries to make a directory out of an existing path that is a file. *) val to_vfs : t -> (module VFS) end
cte_functypes.h
#ifndef CTE_FUNCTYPES #define CTE_FUNCTYPES #include <clb_simple_stuff.h> #include <cio_basicparser.h> /*---------------------------------------------------------------------*/ /* Data type declarations */ /*---------------------------------------------------------------------*/ /* Data type repesenting the various types of encodings for function * symols (including constants) and predicates. */ typedef enum { FSNone, FSIdentVar, /* Ident, starts with capital letter or _ */ FSIdentFreeFun, /* Ident, starts with Lower case letter or SQString */ FSIdentInt, /* Integer */ FSIdentFloat, /* Floating point number */ FSIdentRational, /* Rational number */ FSIdentInterpreted, /* SemIdent */ FSIdentObject /* String "in double quotes" */ }FuncSymbType; /*---------------------------------------------------------------------*/ /* Exported Functions and Variables */ /*---------------------------------------------------------------------*/ /* Function symbols in terms are represented by positive numbers, variables by negative numbers. This alias allows clearer specifications. */ typedef long FunCode; extern TokenType FuncSymbToken; extern TokenType FuncSymbStartToken; FuncSymbType FuncSymbParse(Scanner_p in, DStr_p id); #endif /*---------------------------------------------------------------------*/ /* End of File */ /*---------------------------------------------------------------------*/
/*----------------------------------------------------------------------- File : cte_functypes.h Author: Stephan Schulz Contents Simple, widely used functions for dealing with function symbols and operators. Copyright 1998, 1999 by the author. This code is released under the GNU General Public Licence and the GNU Lesser General Public License. See the file COPYING in the main E directory for details.. Run "eprover -h" for contact information. Changes <1> Sun Nov 9 23:09:33 MET 1997 New -----------------------------------------------------------------------*/
sum_worker.mli
open! Core open Async val main : int -> int Deferred.t
fs.mli
module Append_only : Irmin.Append_only.Maker module Atomic_write : Irmin.Atomic_write.Maker include Irmin.Maker module KV : Irmin.KV_maker (** {1 Extended Stores} *) module Append_only_ext (C : Irmin_fs.Config) : Irmin.Append_only.Maker module Atomic_write_ext (C : Irmin_fs.Config) : Irmin.Atomic_write.Maker module Maker_ext (Obj : Irmin_fs.Config) (Ref : Irmin_fs.Config) : Irmin.Maker
(* * Copyright (c) 2013-2021 Thomas Gazagnaire <thomas@gazagnaire.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. *)
dune
(env (release (flags (:standard -w -3)))) (vendored_dirs stdune)
delegate_storage.mli
(** Places where tezzies can be found in the ledger's state. *) type balance = | Contract of Contract_repr.t | Rewards of Signature.Public_key_hash.t * Cycle_repr.t | Fees of Signature.Public_key_hash.t * Cycle_repr.t | Deposits of Signature.Public_key_hash.t * Cycle_repr.t (** A credit or debit of tezzies to a balance. *) type balance_update = | Debited of Tez_repr.t | Credited of Tez_repr.t (** A list of balance updates. Duplicates may happen. *) type balance_updates = (balance * balance_update) list val balance_updates_encoding : balance_updates Data_encoding.t (** Remove zero-valued balances from a list of updates. *) val cleanup_balance_updates : balance_updates -> balance_updates type frozen_balance = { deposit : Tez_repr.t ; fees : Tez_repr.t ; rewards : Tez_repr.t ; } (** Is the contract eligible to delegation ? *) val is_delegatable: Raw_context.t -> Contract_repr.t -> bool tzresult Lwt.t (** Allow to register a delegate when creating an account. *) val init: Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t -> Raw_context.t tzresult Lwt.t (** Cleanup delegation when deleting a contract. *) val remove: Raw_context.t -> Contract_repr.t -> Raw_context.t tzresult Lwt.t (** Reading the current delegate of a contract. *) val get: Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t option tzresult Lwt.t val registered: Raw_context.t -> Signature.Public_key_hash.t -> bool Lwt.t (** Updating the delegate of a contract. When calling this function on an "implicit contract" this function fails, unless when the registered delegate is the contract manager. In the that case, the manager is now registered as a delegate. One cannot unregister a delegate for now. The associate contract is now 'undeletable'. *) val set: Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t option -> Raw_context.t tzresult Lwt.t (** Same as {!set} ignoring the [delegatable] flag. *) val set_from_script: Raw_context.t -> Contract_repr.t -> Signature.Public_key_hash.t option -> Raw_context.t tzresult Lwt.t type error += | Non_delegatable_contract of Contract_repr.contract (* `Permanent *) | No_deletion of Signature.Public_key_hash.t (* `Permanent *) | Active_delegate (* `Temporary *) | Current_delegate (* `Temporary *) | Empty_delegate_account of Signature.Public_key_hash.t (* `Temporary *) | Balance_too_low_for_deposit of { delegate : Signature.Public_key_hash.t ; deposit : Tez_repr.t ; balance : Tez_repr.t } (* `Temporary *) (** Iterate on all registered delegates. *) val fold: Raw_context.t -> init:'a -> f:(Signature.Public_key_hash.t -> 'a -> 'a Lwt.t) -> 'a Lwt.t (** List all registered delegates. *) val list: Raw_context.t -> Signature.Public_key_hash.t list Lwt.t (** Various functions to 'freeze' tokens. A frozen 'deposit' keeps its associated rolls. When frozen, 'fees' may trigger new rolls allocation. Rewards won't trigger new rolls allocation until unfrozen. *) val freeze_deposit: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t -> Raw_context.t tzresult Lwt.t val freeze_fees: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t -> Raw_context.t tzresult Lwt.t val freeze_rewards: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t -> Raw_context.t tzresult Lwt.t (** Trigger the context maintenance at the end of cycle 'n', i.e.: unfreeze deposit/fees/rewards from 'n - preserved_cycle' ; punish the provided unrevealed seeds (tipically seed from cycle 'n - 1'). Returns a list of account with the amount that was unfrozen for each and the list of deactivated delegates. *) val cycle_end: Raw_context.t -> Cycle_repr.t -> Nonce_storage.unrevealed list -> (Raw_context.t * balance_updates * Signature.Public_key_hash.t list) tzresult Lwt.t (** Burn all then frozen deposit/fees/rewards for a delegate at a given cycle. Returns the burned amounts. *) val punish: Raw_context.t -> Signature.Public_key_hash.t -> Cycle_repr.t -> (Raw_context.t * frozen_balance) tzresult Lwt.t (** Has the given key some frozen tokens in its implicit contract? *) val has_frozen_balance: Raw_context.t -> Signature.Public_key_hash.t -> Cycle_repr.t -> bool tzresult Lwt.t (** Returns the amount of frozen deposit, fees and rewards associated to a given delegate. *) val frozen_balance: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t tzresult Lwt.t val frozen_balance_encoding: frozen_balance Data_encoding.t val frozen_balance_by_cycle_encoding: frozen_balance Cycle_repr.Map.t Data_encoding.t (** Returns the amount of frozen deposit, fees and rewards associated to a given delegate, indexed by the cycle by which at the end the balance will be unfrozen. *) val frozen_balance_by_cycle: Raw_context.t -> Signature.Public_key_hash.t -> frozen_balance Cycle_repr.Map.t Lwt.t (** Returns the full 'balance' of the implicit contract associated to a given key, i.e. the sum of the spendable balance and of the frozen balance. *) val full_balance: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t tzresult Lwt.t val staking_balance: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t tzresult Lwt.t (** Returns the list of contract that delegated towards a given delegate *) val delegated_contracts: Raw_context.t -> Signature.Public_key_hash.t -> Contract_hash.t list Lwt.t val delegated_balance: Raw_context.t -> Signature.Public_key_hash.t -> Tez_repr.t tzresult Lwt.t val deactivated: Raw_context.t -> Signature.Public_key_hash.t -> bool tzresult Lwt.t val grace_period: Raw_context.t -> Signature.Public_key_hash.t -> Cycle_repr.t tzresult Lwt.t
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <contact@tezos.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
test_cemented_store.ml
module Assert_lib = Lib_test_extra.Assert_lib open Test_utils let assert_presence_in_cemented_store ?(with_metadata = true) cemented_store blocks = let open Lwt_result_syntax in List.iter_es (fun b -> let hash = Block_repr.hash b in let* o = Cemented_block_store.get_cemented_block_by_hash ~read_metadata:with_metadata cemented_store hash in match o with | None -> Alcotest.failf "assert_presence_in_cemented_store: cannot find block %a" pp_raw_block b | Some b' -> if with_metadata then ( Assert.equal ~msg:"block equality with metadata" b b' ; return_unit) else ( Assert_lib.Crypto.equal_block ~msg:"block equality without metadata" (Block_repr.header b) (Block_repr.header b') ; return_unit)) blocks let test_cement_pruned_blocks cemented_store = let open Lwt_result_syntax in let*! (blocks, _head) = make_raw_block_list ~kind:`Pruned (genesis_hash, -1l) 4095 in let* () = Cemented_block_store.cement_blocks cemented_store ~write_metadata:false blocks in assert_presence_in_cemented_store ~with_metadata:true cemented_store blocks let test_cement_full_blocks cemented_store = let open Lwt_result_syntax in let*! (blocks, _head) = make_raw_block_list ~kind:`Full (genesis_hash, -1l) 4095 in let* () = Cemented_block_store.cement_blocks cemented_store ~write_metadata:false blocks in assert_presence_in_cemented_store ~with_metadata:false cemented_store blocks let test_metadata_retrieval cemented_store = let open Lwt_result_syntax in let*! (blocks, _head) = make_raw_block_list ~kind:`Full (genesis_hash, -1l) 100 in let* () = Cemented_block_store.cement_blocks cemented_store ~write_metadata:true blocks in assert_presence_in_cemented_store ~with_metadata:true cemented_store blocks let wrap_cemented_store_test (name, f) = let open Lwt_result_syntax in let cemented_store_init f _ () = let prefix_dir = "tezos_indexed_store_test_" in Lwt_utils_unix.with_tempdir prefix_dir (fun base_dir -> let run f = f base_dir in let*! r = run (fun base_dir -> let store_dir = Naming.store_dir ~dir_path:base_dir in let chain_dir = Naming.chain_dir store_dir Chain_id.zero in let*! () = Lwt_unix.mkdir (Naming.dir_path chain_dir) 0o700 in let* cemented_store = Cemented_block_store.init ~readonly:false chain_dir in Error_monad.protect (fun () -> let* () = f cemented_store in Cemented_block_store.close cemented_store ; return_unit)) in match r with | Error err -> Format.printf "@\nTest failed:@\n%a@." Error_monad.pp_print_trace err ; Lwt.fail Alcotest.Test_error | Ok () -> Lwt.return_unit) in Alcotest_lwt.test_case name `Quick (cemented_store_init f) let tests = let test_cases = List.map wrap_cemented_store_test [ ("cementing pruned blocks", test_cement_pruned_blocks); ("cementing full blocks", test_cement_full_blocks); ("retrieve cemented metadata", test_metadata_retrieval); ] in ("cemented store", test_cases)
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2020 Nomadic Labs, <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
local.ml
open Stdune open Dune_cache_storage.Layout open Fiber.O open Import module Store_artifacts_result = struct type t = | Stored of (Path.Build.t * Digest.t) list | Already_present of (Path.Build.t * Digest.t) list | Error of exn | Will_not_store_due_to_non_determinism of Sexp.t let of_store_result ~artifacts t = match (t : Store_result.t) with | Stored -> Stored artifacts | Already_present -> Already_present artifacts | Error exn -> Error exn | Will_not_store_due_to_non_determinism details -> Will_not_store_due_to_non_determinism details let bind t ~f = match t with | Stored data -> f data | Already_present data -> f data | (Error _ | Will_not_store_due_to_non_determinism _) as res -> res end module Target = struct type t = { path : Path.Build.t ; executable : bool } let create path = match Path.Build.lstat path with | { Unix.st_kind = Unix.S_REG; st_perm; _ } -> Path.Build.chmod path ~mode:(Path.Permissions.remove Path.Permissions.write st_perm); let executable = Path.Permissions.test Path.Permissions.execute st_perm in Some { path; executable } | (exception Unix.Unix_error _) | _ -> None end (* This function is like [Unix.link] but handles the "Too many links" error by creating a copy of the [src] in a temporary directory, then atomically replacing the [src] with the copy, and finally creating the requested [dst] by calling [Unix.link src dst] again. We hit the "Too many links" error because we store a lot of empty files in the cache, which all get deduplicated into the same cache entry. This function essentially deletes the "overlinked" entry from the cache, creating a fresh copy with the 0 link count. This leads to some duplication but it's negligible: we might store the empty file several times across all workspaces instead of just storing it once. If you need to debug this function, you can trigger the "Too many links" error by running [for i in {1..100000}; do ln $file tmp/$i; done], where the [$file] is the shared cache entry for the empty file. After that, no more hard links on [$file] will be allowed, triggering the [EMLINK] code path. *) let link_even_if_there_are_too_many_links_already ~src ~dst = try Path.link src dst with | Unix.Unix_error (Unix.EMLINK, _, _) | Unix.Unix_error (Unix.EUNKNOWNERR -1142, _, _) (* Needed for OCaml < 5.1 *) -> Temp.with_temp_file ~dir:temp_dir ~prefix:"dune" ~suffix:"copy" ~f:(function | Error e -> raise e | Ok temp_file -> Io.copy_file ~src ~dst:temp_file (); (* This replaces [src], which has too many links already, with a fresh copy we've just created in the [temp_file]. *) Path.rename temp_file src; (* This should now succeed. *) Path.link src dst) module Artifacts = struct include Dune_cache_storage.Artifacts let store_metadata ~mode ~metadata ~rule_digest (artifacts : (Path.Build.t * Digest.t) list) = let entries = List.map artifacts ~f:(fun (target, file_digest) -> let entry : Metadata_entry.t = { file_name = Path.Build.basename target; file_digest } in entry) in Metadata_file.store ~mode { metadata; entries } ~rule_digest (* Step I of [store_skipping_metadata]. If any of the targets couldn't be stored in the temporary directory, then the result is [Error] with the corresponding exception. Otherwise, the result is [Ok ()]. *) let store_targets_to ~temp_dir ~targets ~mode : unit Or_exn.t = Result.List.iter targets ~f:(fun { Target.path; _ } -> let path_in_build_dir = Path.build path in let path_in_temp_dir = Path.relative temp_dir (Path.basename path_in_build_dir) in Result.try_with (fun () -> Dune_cache_storage.Util.link_or_copy ~mode ~src:path_in_build_dir ~dst:path_in_temp_dir)) (* Step II of [store_skipping_metadata]. Computing digests can be slow, so we do that in parallel. *) let compute_digests_in ~temp_dir ~targets ~compute_digest : (Path.Build.t * Digest.t) list Or_exn.t Fiber.t = let open Fiber.O in Fiber.parallel_map targets ~f:(fun { Target.path; executable } -> let file = Path.relative temp_dir (Path.Build.basename path) in compute_digest ~executable file >>| Or_exn.map ~f:(fun digest -> (path, digest))) >>| Result.List.all (* Step III of [store_skipping_metadata]. *) let store_to_cache_from ~temp_dir ~mode (artifacts : (Path.Build.t * Digest.t) list) = List.fold_left artifacts ~init:Store_result.empty ~f:(fun results (target, digest) -> let file_name = Path.Build.basename target in let path_in_temp_dir = Path.relative temp_dir file_name in let path_in_cache = file_path ~file_digest:digest in let store_using_hardlinks () = match Dune_cache_storage.Util.Optimistically.link ~src:path_in_temp_dir ~dst:path_in_cache with | exception Unix.Unix_error (Unix.EEXIST, _, _) -> ( (* We end up here if the cache already contains an entry for this artifact. We deduplicate by keeping only one copy, in the cache. *) let path_in_build_dir = Path.build target in match Path.unlink_no_err path_in_temp_dir; (* At first, we deduplicate the temporary file. Doing this intermediate step allows us to keep the original target in case the below link step fails. This might happen if the trimmer has just deleted [path_in_cache]. In this rare case, this function fails with an [Error], and so we might end up with some duplicates in the workspace. *) link_even_if_there_are_too_many_links_already ~src:path_in_cache ~dst:path_in_temp_dir; (* Now we can simply rename the temporary file into the target, knowing that the original target remains in place if the renaming fails. One curious case to think about is if the file in the cache happens to have the same inode as the file in the workspace. In that case this deduplication should be a no-op, but the [rename] operation has a quirk where [path_in_temp_dir] can remain on disk. This is not a problem because we clean the temporary directory later. *) Path.rename path_in_temp_dir path_in_build_dir with | exception e -> Store_result.Error e | () -> Already_present) | exception e -> Error e | () -> Stored in let store_using_test_and_rename () = (* CR-someday amokhov: There is a race here. If [path_in_cache] is created after [Path.exists] but before [Path.rename], it will be silently overwritten. Find a good way to avoid this race. *) match Path.exists path_in_cache with | true -> Store_result.Already_present | false -> ( match Dune_cache_storage.Util.Optimistically.rename ~src:path_in_temp_dir ~dst:path_in_cache with | exception e -> Error e | () -> Stored) in let result = match (mode : Dune_cache_storage.Mode.t) with | Hardlink -> store_using_hardlinks () | Copy -> store_using_test_and_rename () in Store_result.combine results result) let store_skipping_metadata ~mode ~targets ~compute_digest : Store_artifacts_result.t Fiber.t = Dune_cache_storage.with_temp_dir ~suffix:"artifacts" (function | Error exn -> Fiber.return (Store_artifacts_result.Error exn) | Ok temp_dir -> ( match store_targets_to ~temp_dir ~targets ~mode with | Error exn -> Fiber.return (Store_artifacts_result.Error exn) | Ok () -> ( compute_digests_in ~temp_dir ~targets ~compute_digest >>| function | Error exn -> Store_artifacts_result.Error exn | Ok artifacts -> let result = store_to_cache_from ~temp_dir ~mode artifacts in Store_artifacts_result.of_store_result ~artifacts result))) let store ~mode ~rule_digest ~compute_digest targets : Store_artifacts_result.t Fiber.t = let+ result = store_skipping_metadata ~mode ~targets ~compute_digest in Store_artifacts_result.bind result ~f:(fun artifacts -> let result = store_metadata ~mode ~rule_digest ~metadata:[] artifacts in Store_artifacts_result.of_store_result ~artifacts result) let create_all_or_nothing ~create ~destroy list = Result.List.fold_left list ~init:[] ~f:(fun acc x -> match create x with | Error e -> List.iter acc ~f:destroy; Error e | Ok v -> Ok (v :: acc)) |> Result.map ~f:List.rev type file_restore_error = | Not_found | Other of exn let restore ~mode ~rule_digest ~target_dir = Restore_result.bind (list ~rule_digest) ~f:(fun (entries : Metadata_entry.t list) -> match create_all_or_nothing entries ~destroy:(fun (path_in_build_dir, _digest) -> Path.Build.unlink_no_err path_in_build_dir) ~create:(fun { Metadata_entry.file_name; file_digest } -> let path_in_build_dir = Path.Build.relative target_dir file_name in let path_in_cache = file_path ~file_digest in match (mode : Dune_cache_storage.Mode.t) with | Hardlink -> ( match link_even_if_there_are_too_many_links_already ~src:path_in_cache ~dst:(Path.build path_in_build_dir) with | exception Unix.Unix_error (Unix.ENOENT, _, _) -> Error (Not_found : file_restore_error) | exception exn -> Error (Other exn) | () -> Ok (path_in_build_dir, file_digest)) | Copy -> ( match Io.copy_file ~src:path_in_cache ~dst:(Path.build path_in_build_dir) () with | exception Sys_error _ -> Error Not_found | () -> Ok (path_in_build_dir, file_digest))) with | Ok artifacts -> Restored artifacts | Error Not_found -> (* We reach this point when one of the entries mentioned in the metadata is missing. The trimmer will eventually delete such "broken" metadata, so it is reasonable to consider that this [rule_digest] is not found in the cache. *) Not_found_in_cache | Error (Other e) -> Error e) end let store_artifacts = Artifacts.store let restore_artifacts = Artifacts.restore
dune
(library (name build_path_prefix_map) (public_name build_path_prefix_map) (modules build_path_prefix_map)) (test (name test) (libraries build_path_prefix_map) (modules test))
quasi_macros_tactic.h
#pragma once #include "util/params.h" class ast_manager; class tactic; tactic * mk_quasi_macros_tactic(ast_manager & m, params_ref const & p = params_ref()); /* ADD_TACTIC("quasi-macros", "Identifies and applies quasi-macros.", "mk_quasi_macros_tactic(m, p)") */
/*++ Copyright (c) 2012 Microsoft Corporation Module Name: quasi_macros_tactic.h Abstract: Quasi-Macros Author: Christoph (cwinter) 2012-10-26 Notes: --*/
operators.ml
(* The operators named here are the ones that it is difficult or impossible to define as "user" infix operators: - -. are both infix and prefix && || have special evaluation :: is also used in patterns ~ triggers a lexer state switch *) open Lens_utility module Operator_not_found_exception = struct type t = string let to_string v = Format.asprintf "Relational lenses do not support operator %s." v let get_op v = v end module Unary = struct type t = Minus | Not [@@deriving show, sexp] let to_string = function | Minus -> "-" | Not -> "!" let of_string s = let open Result in match s with | "-" -> return Minus | "!" -> return Not | _ -> error s let of_string_exn s = of_string s |> Result.ok_exn let fmt f v = Format.fprintf f "%s" (to_string v) end module Binary = struct type t = | Plus | Minus | Multiply | Divide | Greater | GreaterEqual | Less | LessEqual | Equal | LogicalAnd | LogicalOr [@@deriving show, sexp] let to_string = function | Plus -> "+" | Minus -> "-" | Multiply -> "*" | Divide -> "/" | Equal -> "=" | Greater -> ">" | GreaterEqual -> ">=" | Less -> "<" | LessEqual -> "<=" | LogicalAnd -> "AND" | LogicalOr -> "OR" let of_string op = let open Result in match op with | "-" -> return Minus | "*" -> return Multiply | "/" -> return Divide | "&&" -> return LogicalAnd | "||" -> return LogicalOr | "=" -> return Equal | "+" -> return Plus | ">" -> return Greater | ">=" -> return GreaterEqual | "<" -> return Less | "<=" -> return LessEqual | _ -> error op let of_string_exn op = of_string op |> Result.ok_exn let fmt f v = Format.fprintf f "%s" (to_string v) end
(* The operators named here are the ones that it is difficult or impossible to define as "user" infix operators: - -. are both infix and prefix && || have special evaluation :: is also used in patterns ~ triggers a lexer state switch *)
storage_costs.ml
(* The model for read accesses is the following: cost(path_length, read_bytes) = 200_000 + 5000 * path_length + 2 * read_bytes *) let read_access ~path_length ~read_bytes = let open Saturation_repr in let base_cost = safe_int (200_000 + (5000 * path_length)) in Gas_limit_repr.atomic_step_cost (add base_cost (mul (safe_int 2) (safe_int read_bytes))) (* The model for write accesses is the following: cost(written_bytes) = 200_000 + 4 * written_bytes *) let write_access ~written_bytes = let open Saturation_repr in Gas_limit_repr.atomic_step_cost (add (safe_int 200_000) (mul (safe_int 4) (safe_int written_bytes)))
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2020 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
bool.ml
let compare x y = match (x, y) with | true, true | false, false -> Ordering.Eq | true, false -> Gt | false, true -> Lt include Comparator.Operators (struct type nonrec t = bool let compare = compare end) let to_string = string_of_bool let of_string s = bool_of_string_opt s let to_dyn t = Dyn.Bool t let hash (t : bool) = Poly.hash t
dune
(library (name usesppx1) (modules ()) (preprocess (pps ppx1))) (library (name usesppx2) (modules ()) (preprocess (pps ppx2))) (alias (name default) (action (echo %{read:.merlin-conf})))
_contextvarsmodule.c
#include "Python.h" #include "clinic/_contextvarsmodule.c.h" /*[clinic input] module _contextvars [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a0955718c8b8cea6]*/ /*[clinic input] _contextvars.copy_context [clinic start generated code]*/ static PyObject * _contextvars_copy_context_impl(PyObject *module) /*[clinic end generated code: output=1fcd5da7225c4fa9 input=89bb9ae485888440]*/ { return PyContext_CopyCurrent(); } PyDoc_STRVAR(module_doc, "Context Variables"); static PyMethodDef _contextvars_methods[] = { _CONTEXTVARS_COPY_CONTEXT_METHODDEF {NULL, NULL} }; static int _contextvars_exec(PyObject *m) { if (PyModule_AddType(m, &PyContext_Type) < 0) { return -1; } if (PyModule_AddType(m, &PyContextVar_Type) < 0) { return -1; } if (PyModule_AddType(m, &PyContextToken_Type) < 0) { return -1; } return 0; } static struct PyModuleDef_Slot _contextvars_slots[] = { {Py_mod_exec, _contextvars_exec}, {0, NULL} }; static struct PyModuleDef _contextvarsmodule = { PyModuleDef_HEAD_INIT, /* m_base */ "_contextvars", /* m_name */ module_doc, /* m_doc */ 0, /* m_size */ _contextvars_methods, /* m_methods */ _contextvars_slots, /* m_slots */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; PyMODINIT_FUNC PyInit__contextvars(void) { return PyModuleDef_Init(&_contextvarsmodule); }
chunked_suffix.ml
open Import include Chunked_suffix_intf module Make (Io : Io.S) (Errs : Io_errors.S with module Io = Io) = struct module Io = Io module Errs = Errs module Ao = Append_only_file.Make (Io) (Errs) type chunk = { idx : int; suffix_off : int63; ao : Ao.t } type create_error = Io.create_error type open_error = [ Io.open_error | `Closed | `Invalid_argument | `Inconsistent_store | `Read_out_of_bounds ] type add_new_error = [ open_error | Io.close_error | `Pending_flush | `File_exists of string | `Multiple_empty_chunks ] (** A simple container for chunks. *) module Inventory : sig type t val v : int -> (int -> chunk) -> t val appendable : t -> chunk val find : off:int63 -> t -> chunk * int63 (** [find ~off t] returns the chunk that contains suffix offset [off], along with the corresponding [poff] within the chunk. Raises `Read_out_of_bounds exception. *) val fold : (acc:'a -> is_appendable:bool -> chunk:chunk -> 'a) -> 'a -> t -> 'a val open_ : start_idx:int -> chunk_num:int -> open_chunk: (chunk_idx:int -> is_legacy:bool -> is_appendable:bool -> (Ao.t, open_error) result) -> (t, [> open_error ]) result val close : t -> (unit, [> Io.close_error | `Pending_flush ]) result val add_new_appendable : open_chunk: (chunk_idx:int -> is_legacy:bool -> is_appendable:bool -> (Ao.t, add_new_error) result) -> t -> (unit, [> add_new_error ]) result val length : t -> int63 (** [length t] is the length of bytes for all chunks *) val start_idx : t -> int (** [start_idx t] is the idx of the first chunk *) val count : t -> int (** [count t] is the number of chunks *) end = struct type t = { mutable chunks : chunk Array.t } exception OpenInventoryError of open_error let v num create = { chunks = Array.init num create } let appendable t = Array.get t.chunks (Array.length t.chunks - 1) let find ~off t = let open Int63.Syntax in let suffix_off_to_chunk_poff c = off - c.suffix_off in let find c = let end_poff = Ao.end_poff c.ao in let poff = suffix_off_to_chunk_poff c in Int63.zero <= poff && poff < end_poff in match Array.find_opt find t.chunks with | None -> raise (Errors.Pack_error `Read_out_of_bounds) | Some c -> (c, suffix_off_to_chunk_poff c) let end_offset_of_chunk start_offset ao = let chunk_len = Ao.end_poff ao in Int63.Syntax.(start_offset + chunk_len) let is_legacy chunk_idx = chunk_idx = 0 let fold f acc t = let appendable_idx = (appendable t).idx in Array.fold_left (fun acc chunk -> let is_appendable = chunk.idx = appendable_idx in f ~acc ~is_appendable ~chunk) acc t.chunks let open_ ~start_idx ~chunk_num ~open_chunk = let off_acc = ref Int63.zero in let create_chunk i = let suffix_off = !off_acc in let is_appendable = i = chunk_num - 1 in let chunk_idx = start_idx + i in let is_legacy = is_legacy chunk_idx in let open_result = open_chunk ~chunk_idx ~is_legacy ~is_appendable in match open_result with | Error err -> raise (OpenInventoryError err) | Ok ao -> off_acc := end_offset_of_chunk suffix_off ao; { idx = chunk_idx; suffix_off; ao } in try Ok (v chunk_num create_chunk) with OpenInventoryError err -> Error (err : open_error :> [> open_error ]) let close t = (* Close immutable chunks, ignoring errors. *) let _ = Array.sub t.chunks 0 (Array.length t.chunks - 1) |> Array.iter @@ fun chunk -> let _ = Ao.close chunk.ao in () in (* Close appendable chunk and keep error since this is the one that can have a pending flush. *) (appendable t).ao |> Ao.close let wrap_error result = Result.map_error (fun err -> (err : add_new_error :> [> add_new_error ])) result let reopen_last_chunk ~open_chunk t = (* Close the previous appendable chunk and reopen as non-appendable. *) let open Result_syntax in let ({ idx; ao; suffix_off } as last_chunk) = appendable t in let is_legacy = is_legacy idx in (* Compute the suffix_off for the following chunk. *) let length = end_offset_of_chunk suffix_off ao in let* () = Ao.close ao in let* ao = open_chunk ~chunk_idx:idx ~is_legacy ~is_appendable:false |> wrap_error in let pos = Array.length t.chunks - 1 in t.chunks.(pos) <- { last_chunk with ao }; Ok length let create_appendable_chunk ~open_chunk t suffix_off = let open Result_syntax in let next_id = succ (appendable t).idx in let* ao = open_chunk ~chunk_idx:next_id ~is_legacy:false ~is_appendable:true in Ok { idx = next_id; suffix_off; ao } let add_new_appendable ~open_chunk t = let open Result_syntax in let* next_suffix_off = reopen_last_chunk ~open_chunk t in let* chunk = create_appendable_chunk ~open_chunk t next_suffix_off |> wrap_error in t.chunks <- Array.append t.chunks [| chunk |]; Ok () let length t = let open Int63.Syntax in Array.fold_left (fun sum c -> sum + Ao.end_poff c.ao) Int63.zero t.chunks let count t = Array.length t.chunks let start_idx t = t.chunks.(0).idx end type t = { inventory : Inventory.t; root : string; dead_header_size : int } let chunk_path = Layout.V4.suffix_chunk let create_rw ~root ~start_idx ~overwrite ~auto_flush_threshold ~auto_flush_procedure = let open Result_syntax in let chunk_idx = start_idx in let path = chunk_path ~root ~chunk_idx in let+ ao = Ao.create_rw ~path ~overwrite ~auto_flush_threshold ~auto_flush_procedure in let chunk = { idx = chunk_idx; suffix_off = Int63.zero; ao } in let inventory = Inventory.v 1 (Fun.const chunk) in { inventory; root; dead_header_size = 0 } (** A module to adjust values when mapping from chunks to append-only files *) module Ao_shim = struct type t = { dead_header_size : int; end_poff : int63 } let v ~path ~appendable_chunk_poff ~dead_header_size ~is_legacy ~is_appendable = let open Result_syntax in (* Only use the legacy dead_header_size for legacy chunks. *) let dead_header_size = if is_legacy then dead_header_size else 0 in (* The appendable chunk uses the provided [appendable_chunk_poff]; but the others read their size on disk. TODO: this is needed for the Ao module's current APIs but could perhaps be removed by future Ao API modifications. *) let+ end_poff = if is_appendable then Ok appendable_chunk_poff else Io.size_of_path path in { dead_header_size; end_poff } end let open_rw ~root ~appendable_chunk_poff ~start_idx ~chunk_num ~dead_header_size ~auto_flush_threshold ~auto_flush_procedure = let open Result_syntax in let open_chunk ~chunk_idx ~is_legacy ~is_appendable = let path = chunk_path ~root ~chunk_idx in let* { dead_header_size; end_poff } = Ao_shim.v ~path ~appendable_chunk_poff ~dead_header_size ~is_legacy ~is_appendable in match is_appendable with | true -> Ao.open_rw ~path ~end_poff ~dead_header_size ~auto_flush_threshold ~auto_flush_procedure | false -> Ao.open_ro ~path ~end_poff ~dead_header_size in let+ inventory = Inventory.open_ ~start_idx ~chunk_num ~open_chunk in { inventory; root; dead_header_size } let open_ro ~root ~appendable_chunk_poff ~dead_header_size ~start_idx ~chunk_num = let open Result_syntax in let open_chunk ~chunk_idx ~is_legacy ~is_appendable = let path = chunk_path ~root ~chunk_idx in let* { dead_header_size; end_poff } = Ao_shim.v ~path ~appendable_chunk_poff ~dead_header_size ~is_legacy ~is_appendable in Ao.open_ro ~path ~end_poff ~dead_header_size in let+ inventory = Inventory.open_ ~start_idx ~chunk_num ~open_chunk in { inventory; root; dead_header_size } let start_idx t = Inventory.start_idx t.inventory let chunk_num t = Inventory.count t.inventory let appendable_ao t = (Inventory.appendable t.inventory).ao let appendable_chunk_poff t = appendable_ao t |> Ao.end_poff let end_soff t = Inventory.length t.inventory let read_exn t ~off ~len buf = let rec read progress_off suffix_off len_requested = let open Int63.Syntax in (* Find chunk with [suffix_off] and calculate length we can read. *) let chunk, poff = Inventory.find ~off:suffix_off t.inventory in let chunk_end_poff = Ao.end_poff chunk.ao in let read_end_poff = poff + len_requested in let len_read = if read_end_poff > chunk_end_poff then chunk_end_poff - poff else len_requested in (* Perform read. If this is the first read, we can use [buf]; otherwise, we create a new buffer and transfer after the read. *) let len_i = Int63.to_int len_read in let is_first_read = progress_off = Int63.zero in let ao_buf = if is_first_read then buf else Bytes.create len_i in Ao.read_exn chunk.ao ~off:poff ~len:len_i ao_buf; if not is_first_read then Bytes.blit ao_buf 0 buf (Int63.to_int progress_off) len_i; (* Read more if any is [rem]aining. *) let rem = len_requested - len_read in if rem > Int63.zero then read (progress_off + len_read) (suffix_off + len_read) rem else () in read Int63.zero off (Int63.of_int len) let append_exn t s = Ao.append_exn (appendable_ao t) s let add_chunk ~auto_flush_threshold ~auto_flush_procedure t = let open Result_syntax in let* () = let end_poff = appendable_chunk_poff t in if Int63.(equal end_poff zero) then Error `Multiple_empty_chunks else Ok () in let root = t.root in let dead_header_size = t.dead_header_size in let open_chunk ~chunk_idx ~is_legacy ~is_appendable = let path = chunk_path ~root ~chunk_idx in let* { dead_header_size; end_poff } = Ao_shim.v ~path ~appendable_chunk_poff:Int63.zero ~dead_header_size ~is_legacy ~is_appendable in match is_appendable with | true -> Ao.create_rw ~path ~overwrite:true ~auto_flush_threshold ~auto_flush_procedure | false -> Ao.open_ro ~path ~end_poff ~dead_header_size in Inventory.add_new_appendable ~open_chunk t.inventory let close t = Inventory.close t.inventory let empty_buffer t = appendable_ao t |> Ao.empty_buffer let flush t = appendable_ao t |> Ao.flush let fsync t = appendable_ao t |> Ao.fsync let refresh_appendable_chunk_poff t new_poff = Ao.refresh_end_poff (appendable_ao t) new_poff let readonly t = appendable_ao t |> Ao.readonly let auto_flush_threshold t = appendable_ao t |> Ao.auto_flush_threshold let fold_chunks f acc t = Inventory.fold (fun ~acc ~is_appendable ~chunk -> let len = Ao.end_poff chunk.ao in let start_suffix_off = chunk.suffix_off in let end_suffix_off = Int63.Syntax.(start_suffix_off + len) in f ~acc ~idx:chunk.idx ~start_suffix_off ~end_suffix_off ~is_appendable) acc t.inventory end
(* * Copyright (c) 2022-2022 Tarides <contact@tarides.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. *)
benchmark_result.mli
module Metric : sig type t = { name : String.t; value : [ `Text of string | `Numeric of float ]; units : String.t; description : String.t; } end type t = { name : String.t; metrics : Metric.t list } val to_json : t -> string val create_generic : ?median_time:float -> ?median_throughput:float -> string -> t
describeCacheClusters.mli
open Types type input = DescribeCacheClustersMessage.t type output = CacheClusterMessage.t type error = Errors_internal.t include Aws.Call with type input := input and type output := output and type error := error
js-map.ml
let projection_files = Deferred.List.map x ~f:(fun p -> _) >>| String.split ~on:'\n'
tx_rollup.ml
(* Testing ------- Component: Transactional rollups Invocation: dune exec tezt/tests/main.exe -- --file tx_rollup.ml Subject: . *) (** To be attached to process whose output needs to be captured by the regression framework. *) let hooks = Tezos_regression.hooks module Rollup = Rollup.Tx_rollup module Parameters = Rollup.Parameters type t = {node : Node.t; client : Client.t; rollup : string} let assert_some res = match res with Some r -> r | None -> assert false let counter = ref 0 let init_with_tx_rollup ?additional_bootstrap_account_count ?(parameters = Parameters.default) ?alias ~protocol () = let* parameter_file = Parameters.parameter_file ~parameters protocol in let* node, client = Client.init_with_protocol ?additional_bootstrap_account_count ~parameter_file `Client ~protocol () in (* We originate a dumb rollup to be able to generate a paths for tx_rollups related RPCs. *) let*! rollup = Client.Tx_rollup.originate ~hooks ~src:Constant.bootstrap1.public_key_hash ?alias client in let* () = Client.bake_for_and_wait client in return {node; client; rollup} let submit_batch : batch:[> `Batch of Hex.t] -> ?batches:([> `Batch of Hex.t] * string) list -> t -> unit Lwt.t = fun ~batch:(`Batch content) ?(batches = []) {rollup; client; node = _} -> let*! () = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup ~src:Constant.bootstrap1.public_key_hash client in let* () = Lwt_list.iter_s (fun (`Batch content, src) -> let*! () = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup ~src client in return ()) batches in Client.bake_for_and_wait client let submit_commitment ?(src = Constant.bootstrap1.public_key_hash) ?predecessor ~level ~roots ~inbox_content {rollup; client; node = _} = let* inbox_merkle_root = match inbox_content with | `Root inbox_merkle_root -> inbox_merkle_root | `Content messages -> let* inbox = Rollup.compute_inbox_from_messages messages client in return inbox.merkle_root in let*! () = Client.Tx_rollup.submit_commitment ~hooks ?predecessor ~level ~roots ~inbox_merkle_root ~rollup ~src client in Client.bake_for_and_wait client let submit_return_bond ?(src = Constant.bootstrap1.public_key_hash) {rollup; client; node = _} = let*! () = Client.Tx_rollup.submit_return_bond ~hooks ~rollup ~src client in Client.bake_for_and_wait client let submit_finalize_commitment ?(src = Constant.bootstrap1.public_key_hash) {rollup; client; node = _} = Client.Tx_rollup.submit_finalize_commitment ~hooks ~rollup ~src client let submit_remove_commitment ?(src = Constant.bootstrap1.public_key_hash) {rollup; client; node = _} = Client.Tx_rollup.submit_remove_commitment ~hooks ~rollup ~src client let submit_rejection ?(src = Constant.bootstrap1.public_key_hash) ~level ~message ~position ~proof {rollup; client; node = _} ~context_hash ~withdraw_list_hash = let message = Ezjsonm.value_to_string @@ Rollup.json_of_message message in Client.Tx_rollup.submit_rejection ~hooks ~level ~message ~position ~proof ~rollup ~src ~context_hash ~withdraw_list_hash client (* This module only registers regressions tests. Those regressions tests should be used to ensure there is no regressions with the various RPCs exported by the tx_rollups. *) module Regressions = struct module RPC_tests = struct let rpc_state = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollup, regression) - state" ~tags:["tx_rollup"; "rpc"] @@ fun protocol -> let* {node = _; client; rollup} = init_with_tx_rollup ~protocol () in let*! _state = Rollup.get_state ~hooks ~rollup client in return () let rpc_inbox = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - inbox" ~tags:["tx_rollup"; "rpc"; "inbox"] @@ fun protocol -> let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:0 client in Check.(inbox <> None) (Check.option Rollup.Check.inbox) ~error_msg:"Expected some inbox" ; unit let rpc_inbox_message_hash = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - inbox message hash" ~tags:["tx_rollup"; "rpc"; "inbox"; "message"] @@ fun protocol -> let* _node, client = Client.init_with_protocol `Client ~protocol () in let message = Rollup.make_batch "blob" in let*! _hash = Rollup.message_hash ~hooks ~message client in unit let rpc_inbox_merkle_tree_hash = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - inbox merkle tree hash" ~tags:["tx_rollup"; "rpc"; "inbox"; "merkle_tree_hash"] @@ fun protocol -> let* _node, client = Client.init_with_protocol `Client ~protocol () in let messages = List.map Rollup.make_batch ["blob"; "gloubiboulga"] in let* message_hashes = Lwt_list.map_p (fun message -> let*! message_hash = Rollup.message_hash ~hooks ~message client in return message_hash) messages in let*! _hash = Rollup.inbox_merkle_tree_hash ~hooks ~message_hashes client in unit let rpc_inbox_merkle_tree_path = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - inbox merkle tree path" ~tags:["tx_rollup"; "rpc"; "inbox"; "merkle_tree_path"] @@ fun protocol -> let* _node, client = Client.init_with_protocol `Client ~protocol () in let messages = List.map Rollup.make_batch ["Kouroukoukou"; "roukoukou"; "stach"; "stach"] in let* message_hashes = Lwt_list.map_p (fun message -> let*! message_hash = Rollup.message_hash ~hooks ~message client in return message_hash) messages in let*! _ = Rollup.inbox_merkle_tree_path ~hooks ~message_hashes ~position:3 client in let*! _ = Rollup.inbox_merkle_tree_path ~hooks ~message_hashes ~position:0 client in let*? process = Rollup.inbox_merkle_tree_path ~hooks ~message_hashes ~position:4 client in Process.check_error ~msg:(rex "Merkle_list_invalid_positio") process (** This function allows to: - submit a batch to the rollup - submit a commitment for the latest inbox (containing the batch) - retrieve the latest commitment that have been submitted - compare its hash to the latest commitment hash stored in the rollup's state. *) let submit_a_batch_and_commit_helper ({rollup; client; node = _} as state) = (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let*! commitment = Rollup.get_commitment ~hooks ~block:"head" ~level:0 ~rollup client in let hash = Option.map (fun (c : Rollup.submitted_commitment) -> c.commitment_hash) commitment in let*! state = Rollup.get_state ~hooks ~rollup client in Check.(state.Rollup.commitment_newest_hash = hash) (Check.option Check.string) ~error_msg:"Commitment hash mismatch: %L vs %R" ; unit (** This function tests the RPC that allows to retrieve a commitment. For that, it initializes a network with a tx_rollup, and calls the helper function {submit_a_batch_and_commit_helper} *) let rpc_commitment = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - commitment" ~tags:["tx_rollup"; "rpc"; "commitment"] @@ fun protocol -> let* state = init_with_tx_rollup ~protocol () in submit_a_batch_and_commit_helper state (** This function tests the commitment removal operation. For that, it: - originates a rollup - submits a batch to it - send a commitment for the batch - finalizes the commitment once sufficiently many blocks are baked - removes the commitment once sufficiently many blocks are baked Some checks related to commitment removal are done (in particular, the value of field last_removed_commitments_hashes in the rollup's state). *) let rpc_commitment_remove = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - commitment remove" ~tags:["tx_rollup"; "rpc"; "commitment"] @@ fun protocol -> let parameters = Parameters.{finality_period = 2; withdraw_period = 2} in let* ({rollup; client; _} as state) = init_with_tx_rollup ~protocol ~parameters () in let src = Constant.bootstrap1.public_key_hash in Log.info "Step 1. Submit a batch and its commitment" ; let* () = submit_a_batch_and_commit_helper state in Log.info "Step 2. Bake before finalizing. But one block is missing" ; let* () = repeat (parameters.finality_period - 1) (fun () -> Client.bake_for_and_wait client) in Log.info "Step 3. Submit finalize commitment that should fail" ; let*? p = Client.Tx_rollup.submit_finalize_commitment ~hooks ~rollup ~src client in let* () = Process.check_error ~msg:(rex "tx_rollup_no_commitment_to_finalize") p in Log.info "Step 4. Bake the missing level to be able to finalize" ; let* () = Client.bake_for_and_wait client in Log.info "Step 5. Submit finalize commitment and bake" ; let*! () = submit_finalize_commitment state in let* () = Client.bake_for_and_wait client in Log.info "Step 6. Bake before removing the commitment. But one block is missing" ; let* () = repeat (parameters.withdraw_period - 1) (fun () -> Client.bake_for_and_wait client) in Log.info "Step 7. Submit remove commitment that should fail" ; let*? p = submit_remove_commitment ~src:Constant.bootstrap2.public_key_hash state in let* () = Process.check_error ~msg:(rex "tx_rollup_remove_commitment_too_early") p in Log.info "Step 8. Last_removed_commitments_hashes is None before removing" ; let*! r_state = Rollup.get_state ~hooks ~rollup client in Check.(r_state.Rollup.last_removed_commitment_hashes = None) (Check.option Rollup.Check.commitments_hashes) ~error_msg: "last_removed_commitments_hashes mismatch. Expected %R, but got %L" ; Log.info "Step 9. Bake the missing level to be able to remove commit." ; let* () = Client.bake_for_and_wait client in Log.info "Step 10. Submit remove commitment and bake" ; let*! () = submit_remove_commitment ~src:Constant.bootstrap2.public_key_hash state in let* () = Client.bake_for_and_wait client in Log.info "Step 11. Check the new value of last_removed_commitments_hashes" ; let*! r_state = Rollup.get_state ~hooks ~rollup client in let expected_last = { Rollup.message_hash = Constant.tx_rollup_initial_message_result; Rollup.commitment_hash = assert_some r_state.Rollup.commitment_newest_hash; } in Check.(r_state.Rollup.last_removed_commitment_hashes = Some expected_last) (Check.option Rollup.Check.commitments_hashes) ~error_msg: "Last_removed_commitments_hashes mismatch. Expected %R, but got %L" ; unit let rpc_pending_bonded_commitment = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - pending bonded commitments" ~tags:["tx_rollup"; "rpc"; "commitment"; "bond"] @@ fun protocol -> let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let*! _commitment = Rollup.get_pending_bonded_commitments ~hooks ~block:"head" ~rollup ~pkh:Constant.bootstrap1.public_key_hash client in (* Use a key which has no commitment. *) let*! _commitment = Rollup.get_pending_bonded_commitments ~hooks ~block:"head" ~rollup ~pkh:Constant.bootstrap2.public_key_hash client in unit let batch_encoding = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - batch encoding" ~tags:["tx_rollup"; "batch"; "encoding"] @@ fun protocol -> let* ({client; rollup = _; node = _} as state) = init_with_tx_rollup ~protocol () in (* Batch with all possible characters. *) let batch = Rollup.make_batch (String.init 256 Char.chr) in let* () = submit_batch ~batch state in let* block = RPC.Client.call client @@ RPC.get_chain_block () in let op = JSON.(block |-> "operations" |=> 3 |=> 0 |-> "contents" |=> 0) in Check.( ((JSON.(op |-> "kind" |> as_string) = "tx_rollup_submit_batch") ~error_msg:"Unexpected operation. Got: %L. Expected: %R.") string) ; let batch_content = JSON.(op |-> "content") in let batch_content_str = JSON.encode batch_content in Regression.capture batch_content_str ; if not (JSON.is_string batch_content) then Test.fail ~__LOC__ "Batch content in JSON should be a string: %s." batch_content_str ; unit let rpc_inbox_future = Protocol.register_test ~__FILE__ ~title:"RPC (tx_rollups, regression) - inbox from the future" ~tags:["tx_rollup"; "rpc"; "inbox"] @@ fun protocol -> let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:1 client in Check.(inbox = None) (Check.option Rollup.Check.inbox) ~error_msg:"Expected no inbox" ; unit end module Limits = struct (* The constant comes from the default parameters of the protocol. *) type limits = {batch_limit : int; inbox_limit : int} let get_limits client = let* json = RPC.Client.call client @@ RPC.get_chain_block_context_constants () in let batch_limit = JSON.(json |-> "tx_rollup_hard_size_limit_per_message" |> as_int) in let inbox_limit = JSON.(json |-> "tx_rollup_hard_size_limit_per_inbox" |> as_int) in return {batch_limit; inbox_limit} let submit_empty_batch = Protocol.register_regression_test ~__FILE__ ~title:"Submit empty batch" ~tags:["tx_rollup"; "batch"; "client"] @@ fun protocol -> let* state = init_with_tx_rollup ~protocol () in let batch = Rollup.make_batch "" in let* () = submit_batch ~batch state in unit let submit_maximum_size_batch = Protocol.register_regression_test ~__FILE__ ~title:"Submit maximum size batch" ~tags:["tx_rollup"; "batch"; "client"] @@ fun protocol -> let* state = init_with_tx_rollup ~protocol () in let* {batch_limit; _} = get_limits state.client in let batch = Rollup.make_batch (String.make batch_limit 'b') in let* () = submit_batch ~batch state in let (`Batch content) = Rollup.make_batch (String.make (batch_limit + 1) 'c') in let*? process = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup:state.rollup ~src:Constant.bootstrap1.public_key_hash state.client in Process.check_error ~msg: (rex "A message submitted to a transaction rollup inbox exceeds limit") process let inbox_maximum_size = Protocol.register_regression_test ~__FILE__ ~title:"Submit maximum size inbox" ~tags:["tx_rollup"; "inbox"; "client"] @@ fun protocol -> (* We need a first client to fetch the protocol constants *) let* old_parameter_file = Parameters.(parameter_file ~parameters:default protocol) in let* _, old_client = Client.init_with_protocol ~parameter_file:old_parameter_file `Client ~protocol () in let* {inbox_limit; batch_limit} = get_limits old_client in (* The test assumes inbox_limit % batch_limit = 0 *) let max_batch_number_per_inbox = inbox_limit / batch_limit in let additional_bootstrap_account_count = max_batch_number_per_inbox - 5 in let* {client; rollup; node = _} = init_with_tx_rollup ~additional_bootstrap_account_count ~protocol () in let (`Batch content) = Rollup.make_batch (String.make batch_limit 'a') in let* () = fold max_batch_number_per_inbox () (fun i () -> let src = Account.Bootstrap.alias (i + 1) in let*! () = Client.Tx_rollup.submit_batch ~hooks ~log_output:false ~log_command:false ~content ~rollup ~src client in unit) in let* () = Client.bake_for_and_wait client in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:0 client in let inbox = assert_some inbox in Check.(inbox.cumulated_size = inbox_limit) Check.int ~error_msg:"Unexpected inbox size. Expected %R. Got %L" ; unit end module Fail = struct let client_submit_batch_invalid_rollup_address = let open Tezt_tezos in Protocol.register_regression_test ~__FILE__ ~title:"Submit a batch to an invalid rollup address should fail" ~tags:["tx_rollup"; "client"; "fail"; "batch"] @@ fun protocol -> let* parameter_file = Parameters.parameter_file protocol in let* _node, client = Client.init_with_protocol ~parameter_file `Client ~protocol () in let invalid_address = "this is an invalid tx rollup address" in let*? process = Client.Tx_rollup.submit_batch ~hooks ~content:(Hex.of_string "") ~rollup:invalid_address ~src:Constant.bootstrap1.public_key_hash client in let* () = Process.check_error ~exit_code:1 ~msg: (rex (Format.sprintf "Parameter '%s' is an invalid transaction rollup address \ encoded in a base58 string." invalid_address)) process in unit let client_submit_finalize_commitment_no_batch = Protocol.register_regression_test ~__FILE__ ~title:"Submit a finalize commitment operation without batch" ~tags:["tx_rollup"; "client"; "fail"; "finalize"] @@ fun protocol -> let* ({rollup = _; client; node = _} as state) = init_with_tx_rollup ~protocol () in let* () = Client.bake_for_and_wait client in let*? process = submit_finalize_commitment state in Process.check_error ~exit_code:1 ~msg:(rex "tx_rollup_no_commitment_to_finalize") process let client_submit_finalize_commitment_no_commitment = Protocol.register_regression_test ~__FILE__ ~title:"Submit a finalize commitment operation without commitment" ~tags:["tx_rollup"; "client"; "fail"; "finalize"] @@ fun protocol -> let* ({rollup = _; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let*? process = submit_finalize_commitment state in Process.check_error ~exit_code:1 ~msg:(rex "tx_rollup_no_commitment_to_finalize") process let client_submit_finalize_commitment_future = Protocol.register_regression_test ~__FILE__ ~title: "Submit a finalize commitment operation for a commitment in the \ future" ~tags:["tx_rollup"; "client"; "fail"; "finalize"] @@ fun protocol -> let* ({rollup = _; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let*? process = submit_finalize_commitment state in Process.check_error ~exit_code:1 ~msg:(rex "tx_rollup_no_commitment_to_finalize") process let client_submit_finalize_too_recent_commitment = Protocol.register_regression_test ~__FILE__ ~title:"Try to finalize a too recent commitment" ~tags:["tx_rollup"; "client"; "fail"; "finalize"] @@ fun protocol -> let* ({rollup = _; client; node = _} as state) = init_with_tx_rollup ~protocol () in (* The content of the batch does not matter for the regression test. *) let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let* () = Client.bake_for_and_wait client in let*? process = submit_finalize_commitment state in Process.check_error ~exit_code:1 ~msg:(rex "tx_rollup_no_commitment_to_finalize") process end let register protocols = RPC_tests.rpc_state protocols ; RPC_tests.rpc_inbox protocols ; RPC_tests.rpc_inbox_message_hash protocols ; RPC_tests.rpc_inbox_merkle_tree_hash protocols ; RPC_tests.rpc_inbox_merkle_tree_path protocols ; RPC_tests.rpc_commitment protocols ; RPC_tests.rpc_commitment_remove protocols ; RPC_tests.rpc_pending_bonded_commitment protocols ; RPC_tests.batch_encoding protocols ; RPC_tests.rpc_inbox_future protocols ; Limits.submit_empty_batch protocols ; Limits.submit_maximum_size_batch protocols ; Limits.inbox_maximum_size protocols ; Fail.client_submit_batch_invalid_rollup_address protocols ; Fail.client_submit_finalize_commitment_no_batch protocols ; Fail.client_submit_finalize_commitment_no_commitment protocols ; Fail.client_submit_finalize_commitment_future protocols ; Fail.client_submit_finalize_too_recent_commitment protocols end (** To be attached to process whose output needs to be captured by the regression framework. *) let hooks = Tezos_regression.hooks let submit_three_batches_and_check_size ~rollup ~tezos_level ~tx_level node client batches = let messages = List.map (fun (batch_message, _, _) -> batch_message) batches in let* () = Lwt_list.iter_s (fun (`Batch content, src, _) -> let*! () = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup ~src client in unit) batches in let* () = Client.bake_for_and_wait client in let* _ = Node.wait_for_level node tezos_level in (* Check the inbox has been created, with the expected cumulated size. *) let* expected_inbox = Rollup.compute_inbox_from_messages ~hooks (messages :> Rollup.message list) client in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:tx_level client in Check.( ((inbox = Some expected_inbox) ~error_msg:"Unexpected inbox. Got: %L. Expected: %R.") (Check.option Rollup.Check.inbox)) ; return () let test_submit_batches_in_several_blocks = Protocol.register_test ~__FILE__ ~title:"Submit batches in several blocks" ~tags:["tx_rollup"] @@ fun protocol -> let* parameter_file = Parameters.parameter_file protocol in let* node, client = Client.init_with_protocol ~parameter_file `Client ~protocol () in let*! rollup = Client.Tx_rollup.originate ~hooks ~src:Constant.bootstrap1.public_key_hash ~alias:"tx_rollup" client in let* () = Client.bake_for_and_wait client in (* We check the rollup exists by trying to fetch its state. Since it is a regression test, we can detect changes to this default state. *) let*! state = Rollup.get_state ~hooks ~rollup client in let expected_state = Rollup. { finalized_commitments = Empty 0; unfinalized_commitments = Empty 0; uncommitted_inboxes = Empty 0; tezos_head_level = None; commitment_newest_hash = None; burn_per_byte = 0; inbox_ema = 0; last_removed_commitment_hashes = None; } in Check.(state = expected_state) Rollup.Check.state ~error_msg:"Unexpected state. Got: %L. Expected: %R." ; let (`Batch content) = Rollup.make_batch "tezos" in let*! () = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup ~src:Constant.bootstrap1.public_key_hash client in let batch1 = Rollup.make_batch "tezos" in let batch2 = Rollup.make_batch "tx_rollup" in let batch3 = Rollup.make_batch "layer-2" in let*! (`Hash batch1_hash) = Rollup.message_hash ~message:batch1 client in let*! (`Hash batch2_hash) = Rollup.message_hash ~message:batch2 client in let*! (`Hash batch3_hash) = Rollup.message_hash ~message:batch3 client in let submission = [ (batch2, Constant.bootstrap2.public_key_hash, batch2_hash); (batch3, Constant.bootstrap3.public_key_hash, batch3_hash); (batch1, Constant.bootstrap1.public_key_hash, batch1_hash); ] in (* Let’s try once and see if everything goes as expected *) let* () = submit_three_batches_and_check_size ~rollup node client submission ~tezos_level:3 ~tx_level:0 in (* Let’s try to see if we can submit three more batches in the next level *) let* () = submit_three_batches_and_check_size ~rollup node client submission ~tezos_level:4 ~tx_level:1 in let* () = submit_three_batches_and_check_size ~rollup node client submission ~tezos_level:5 ~tx_level:2 in let*! _state = Rollup.get_state ~hooks ~rollup client in unit (* This test may make more sense as a unit test; consider removing it. See issue https://gitlab.com/tezos/tezos/-/issues/3546. *) let test_submit_from_originated_source = let open Tezt_tezos in Protocol.register_test ~__FILE__ ~title:"Submit from an originated contract should fail" ~tags:["tx_rollup"; "client"] @@ fun protocol -> let* parameter_file = Parameters.parameter_file protocol in let* _node, client = Client.init_with_protocol ~parameter_file `Client ~protocol () in (* We begin by originating a contract *) let* _alias, originated_contract = Client.originate_contract_at ~amount:Tez.zero ~src:"bootstrap1" ~init:"Some \"initial storage\"" ~burn_cap:Tez.(of_int 3) client ["mini_scenarios"; "str_id"] protocol in let* () = Client.bake_for_and_wait client in (* We originate a tx_rollup using an implicit account *) let*! rollup = Client.Tx_rollup.originate ~src:Constant.bootstrap1.public_key_hash ~alias:"tx_rollup" client in let* () = Client.bake_for_and_wait client in let (`Batch content) = Rollup.Tx_rollup.make_batch "tezos" in (* Finally, we submit a batch to the tx_rollup from an originated contract *) let*? process = Client.Tx_rollup.submit_batch ~hooks ~content ~rollup ~src:originated_contract client in let msg = match protocol with | Lima | Mumbai | Alpha -> rex "Erroneous command line argument" in let* () = Process.check_error ~exit_code:1 ~msg process in unit let test_rollup_with_two_commitments = Protocol.register_test ~__FILE__ ~title:"Submit 2 batches, commit, finalize and remove the commitments" ~tags:["tx_rollup"; "commitment"; "batch"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let*! commitment = Rollup.get_commitment ~hooks ~rollup ~level:0 client in let first_commitment_level = (assert_some commitment).commitment.level in Check.(first_commitment_level = 0) Check.int ~error_msg:"First commitment level must be 0" ; (* There is only one commitment, so trying to get level 1 will fail *) let*! commitment = Rollup.get_commitment ~hooks ~rollup ~level:1 client in Check.(commitment = None) (Check.option Rollup.Check.commitment) ~error_msg:"Expected no commitment" ; let*! () = submit_finalize_commitment state in (* A second submission just to ensure it can be included into a block even if it fails. *) let*! () = submit_finalize_commitment ~src:Constant.bootstrap2.public_key_hash state in let* _ = Client.bake_for_and_wait client in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:0 client in Check.(inbox = None) (Check.option Rollup.Check.inbox) ~error_msg:"Expected no inbox" ; let* json = RPC.Client.call client @@ RPC.get_chain_block_operations () in let manager_operations = JSON.(json |=> 3 |> as_list) in Check.(List.length manager_operations = 2) Check.int ~error_msg:"Two operations manager expected in the last block" ; let first_op = List.nth manager_operations 0 in let second_op = List.nth manager_operations 1 in let get_status op = JSON.( op |-> "contents" |=> 0 |-> "metadata" |-> "operation_result" |-> "status" |> as_string) in let first_op_status = get_status first_op in let second_op_status = get_status second_op in Check.(first_op_status = "applied") Check.string ~error_msg:"The first operation status expected is %R. Got %L" ; Check.(second_op_status = "failed") Check.string ~error_msg:"The second operation status expected is %R. Got %L" ; (* We try to finalize a new commitment but it fails. *) let*? process = submit_finalize_commitment state in let* () = Process.check_error ~exit_code:1 ~msg:(rex "tx_rollup_no_commitment_to_finalize") process in let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let*! commitment = Rollup.get_commitment ~hooks ~rollup ~level:0 client in let* () = Client.bake_for_and_wait client in let*! () = submit_remove_commitment ~src:Constant.bootstrap2.public_key_hash state in let* () = Client.bake_for_and_wait client in let predecessor = Option.map (fun (c : Rollup.submitted_commitment) -> c.commitment_hash) commitment in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:1 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content ?predecessor state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let*! () = submit_finalize_commitment ~src:Constant.bootstrap2.public_key_hash state in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:1 client in Check.(inbox <> None) (Check.option Rollup.Check.inbox) ~error_msg:"Expected some inbox" ; let* () = Client.bake_for_and_wait client in let*! inbox = Rollup.get_inbox ~hooks ~rollup ~level:0 client in Check.(inbox = None) (Check.option Rollup.Check.inbox) ~error_msg:"Expected no inbox" ; let*! _commitment = Rollup.get_commitment ~hooks ~rollup ~level:1 client in let*! commitment = Rollup.get_commitment ~hooks ~rollup ~level:0 client in Check.(commitment = None) (Check.option Rollup.Check.commitment) ~error_msg:"Expected no commitment" ; let* () = Client.bake_for_and_wait client in let*! () = submit_remove_commitment ~src:Constant.bootstrap2.public_key_hash state in let* () = Client.bake_for_and_wait client in let*! _commitment = Rollup.get_commitment ~hooks ~rollup ~level:0 client in let*! commitment = Rollup.get_commitment ~hooks ~rollup ~level:1 client in Check.(commitment = None) (Check.option Rollup.Check.commitment) ~error_msg:"Expected no commitment" ; let* () = submit_return_bond ~src:Constant.bootstrap1.public_key_hash state in let* json = RPC.raw_bytes ~path:["tx_rollup"] client in let json_object = JSON.as_object json in (* Only the state for the rollup should be allocated. *) Check.(List.length json_object = 1) Check.int ~error_msg:"Expected the rollup storage containing one field. Got: %L" ; unit let test_rollup_last_commitment_is_rejected = Protocol.register_regression_test ~__FILE__ ~title:"RPC (tx_rollup, regression) - rejection" ~tags:["tx_rollup"; "rejection"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let content = String.make 5_000 'b' in let batch = Rollup.make_batch content in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:["txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2"] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let*! _ = RPC.Tx_rollup.get_state ~rollup client in let*! message_hash = Rollup.message_hash ~message:(Rollup.make_batch content) client in let*! path = Rollup.inbox_merkle_tree_path ~message_hashes:[message_hash] ~position:0 client in let message = Rollup.make_batch content in let message_result_hash = "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2" in let*! rejected_message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes: [`Hash "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2"] ~position:0 client in let agreed_message_result_path = "[]" in let*! () = submit_rejection ~level:0 ~message ~position:0 ~path:(path |> JSON.encode) ~message_result_hash ~rejected_message_result_path:(rejected_message_result_path |> JSON.encode) ~agreed_message_result_path ~proof:Constant.tx_rollup_proof_initial_state ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in let* () = Client.bake_for_and_wait client in let*! _ = RPC.Tx_rollup.get_state ~rollup client in let* _ = RPC.Client.call client @@ RPC.get_chain_block () in unit let test_rollup_reject_position_one = Protocol.register_test ~__FILE__ ~title:"reject commitment using position one" ~tags:["tx_rollup"; "rejection"; "batch"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let batch = Rollup.make_batch "blob" in let message = batch in let* () = submit_batch ~batch ~batches:[(batch, Constant.bootstrap2.public_key_hash)] state in let* _ = RPC.Client.call client @@ RPC.get_chain_block () in let inbox_content = `Content [batch; batch] in let* () = submit_commitment ~level:0 ~roots: [ Constant.tx_rollup_initial_message_result; "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2"; ] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let*! _ = RPC.Tx_rollup.get_state ~rollup client in let*! message_hash = Rollup.message_hash ~message client in let*! path = Rollup.inbox_merkle_tree_path ~message_hashes:[message_hash; message_hash] ~position:1 client in let*! agreed_message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes: [ `Hash Constant.tx_rollup_initial_message_result; `Hash "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2"; ] ~position:0 client in let*! rejected_message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes: [ `Hash Constant.tx_rollup_initial_message_result; `Hash "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2"; ] ~position:1 client in let*! () = submit_rejection ~level:0 ~message ~position:1 ~path:(path |> JSON.encode) ~message_result_hash: "txmr2DouKqJu5o8KEVGe6gLoiw1J3krjsxhf6C2a1kDNTTr8BdKpf2" ~rejected_message_result_path:(rejected_message_result_path |> JSON.encode) ~agreed_message_result_path:(agreed_message_result_path |> JSON.encode) ~proof:Constant.tx_rollup_proof_initial_state ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in let* () = Client.bake_for_and_wait client in let*! _ = RPC.Tx_rollup.get_state ~rollup client in let* _ = RPC.Client.call client @@ RPC.get_chain_block () in unit let test_rollup_wrong_rejection = Protocol.register_test ~__FILE__ ~title:"wrong rejection" ~tags:["tx_rollup"; "rejection"; "batch"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let message = batch in let*! message_hash = Rollup.message_hash ~message client in let*! path = Rollup.inbox_merkle_tree_path ~message_hashes:[message_hash] ~position:0 client in let message_result_hash = Constant.tx_rollup_initial_message_result in let*! message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes:[`Hash Constant.tx_rollup_initial_message_result] ~position:0 client in (* The proof is invalid, as the submitted batch is stupid, the after hash should be the same as before. *) let*? process = submit_rejection ~proof: {|{ "version": 3, "before": { "node": "CoVu7Pqp1Gh3z33mink5T5Q2kAQKtnn3GHxVhyehdKZpQMBxFBGF" } , "after": { "node": "CoUeJrcPBj3T3iJL3PY4jZHnmZa5rRZ87VQPdSBNBcwZRMWJGh9j" } , "state": [] }|} ~level:0 ~message ~position:0 ~path:(JSON.encode path) ~message_result_hash ~rejected_message_result_path:(JSON.encode message_result_path) ~agreed_message_result_path:"[]" ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in let* () = Process.check_error ~msg:(rex "tx_rollup_proof_failed_to_reject") process in let* () = Client.bake_for_and_wait client in let*! state = Rollup.get_state ~rollup client in match state.unfinalized_commitments with | Interval (0, _) -> unit | _ -> Test.fail "Wrong rollup state: Expected commitment head at level 0" let test_rollup_wrong_path_for_rejection = Protocol.register_test ~__FILE__ ~title:"wrong message path for rejection" ~tags:["tx_rollup"; "rejection"; "batch"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let message_result_hash = Constant.tx_rollup_initial_message_result in let*! message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes:[`Hash Constant.tx_rollup_initial_message_result] ~position:0 client in let*! _ = RPC.Tx_rollup.get_state ~rollup client in let*? process = submit_rejection ~proof:Constant.tx_rollup_proof_initial_state ~level:0 ~message:batch ~position:0 ~path:"[]" ~message_result_hash ~rejected_message_result_path:(JSON.encode message_result_path) ~agreed_message_result_path:"[]" ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in let* () = Process.check_error ~msg:(rex "tx_rollup_wrong_message_path") process in unit let test_rollup_wrong_rejection_long_path = Protocol.register_test ~__FILE__ ~title:"wrong rejection with long path" ~tags:["tx_rollup"; "rejection"; "batch"] @@ fun protocol -> let parameters = Parameters.{finality_period = 1; withdraw_period = 1} in let* ({rollup = _; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in let* () = Client.bake_for_and_wait client in let inbox_content = `Content [batch] in let* () = submit_commitment ~level:0 ~roots:[Constant.tx_rollup_initial_message_result] ~inbox_content state in let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in let message = batch in let*! message_hash = Rollup.message_hash ~message client in let message_hashes = List.init 1025 (fun _ -> message_hash) in let position = 5 in let*! bad_path = Rollup.inbox_merkle_tree_path ~message_hashes ~position client in let message_hashes = List.init 1024 (fun _ -> message_hash) in let*! good_path = Rollup.inbox_merkle_tree_path ~message_hashes ~position client in let bad_path = JSON.encode bad_path in let good_path = JSON.encode good_path in let message_result_hash = Constant.tx_rollup_initial_message_result in let*! rejected_message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes:[`Hash Constant.tx_rollup_initial_message_result] ~position:0 client in let agreed_message_result_path = "[]" in let*? process = submit_rejection ~proof:Constant.tx_rollup_proof_initial_state ~level:0 ~message ~position ~path:bad_path ~message_result_hash ~rejected_message_result_path:(rejected_message_result_path |> JSON.encode) ~agreed_message_result_path ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in let* () = Process.check_error ~msg:(rex "tx_rollup_wrong_message_path_depth") process in (* We check here the path is valid but the operation is rejected for a different reason. *) let*? process = submit_rejection ~proof:Constant.tx_rollup_proof_initial_state ~level:0 ~message ~position:0 ~path:good_path ~message_result_hash ~rejected_message_result_path:(rejected_message_result_path |> JSON.encode) ~agreed_message_result_path ~context_hash:Constant.tx_rollup_empty_l2_context ~withdraw_list_hash:Constant.tx_rollup_empty_withdraw_list_hash state in Process.check_error ~msg:(rex "tx_rollup_wrong_message_path") process let check_bond_is ~src client ~expected = let* given = RPC.Client.call client @@ RPC.get_chain_block_context_contract_frozen_bonds ~id:src () in Check.(given = Tez.of_mutez_int expected) Tez.typ ~error_msg:"Unexpected frozen bond for tx rollup. Expected %R. Got %L" ; unit let attempt_return_bond ~(expected : [`Ok | `Ko]) ~src state client = let* expected_bond_after_op = Lwt.catch (fun () -> let* () = submit_return_bond ~src state in if expected = `Ko then Test.fail "Return bond expected to fail but succeeded" ; return 0) (fun _exn -> if expected = `Ok then Test.fail "Return bond expected to succeed but failed" ; let* constants = RPC.Client.call client @@ RPC.get_chain_block_context_constants () in return JSON.(constants |-> "tx_rollup_commitment_bond" |> as_int)) in check_bond_is ~src state.client ~expected:expected_bond_after_op (** This function tests some simple situations where bond for commiting in a rollup could be returned or not. *) let test_rollup_bond_return = Protocol.register_test ~__FILE__ ~title:"bond return" ~tags:["tx_rollup"; "bond"; "return"] @@ fun protocol -> let parameters = Parameters.{finality_period = 4; withdraw_period = 4} in let* ({rollup; client; node = _} as state) = init_with_tx_rollup ~parameters ~protocol () in let src = Constant.bootstrap2.public_key_hash in let* constants = RPC.Client.call client @@ RPC.get_chain_block_context_constants () in let commit_bond = JSON.(constants |-> "tx_rollup_commitment_bond" |> as_int) in (* No bond deposited at the beginning *) let* () = check_bond_is ~src client ~expected:0 in (* Auxiliary function that sends a batch to a rollup, then the corresponding commitment, and then finalizes the commitment if [finalize] is true. *) let batch_commit_finalize = let current_calls_counter = ref 0 in let step msg = Log.info "call %d) - %s" !current_calls_counter msg in fun ?(finalize = true) ?(remove = true) ~rollup_level () -> incr current_calls_counter ; step "1. Submit batch" ; let batch = Rollup.make_batch "blob" in let* () = submit_batch ~batch state in step "2. Submit commitment" ; let*! s = Rollup.get_state ~rollup client in let* () = submit_commitment ~src ~level:rollup_level ~roots:[Constant.tx_rollup_initial_message_result] ?predecessor:s.Rollup.commitment_newest_hash ~inbox_content:(`Content [batch]) state in step "3. Repeat bake before finalizing commitment" ; let* () = repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in step "4. Attempt return bond, which should fail" ; let* () = attempt_return_bond state client ~src ~expected:`Ko in if not finalize then unit else let () = step "5. Submit finalize_commitment and bake" in let*! () = submit_finalize_commitment state in if not remove then unit else ( step "6. Repeat bake before finalizing commitment" ; let* () = (* +1 because [submit_finalize_commitment] does not bake *) repeat (parameters.withdraw_period + 1) (fun () -> Client.bake_for_and_wait client) in let () = step "7. Submit remove_commitment and bake" in let*! () = submit_remove_commitment state in let* () = check_bond_is ~src client ~expected:commit_bond in Client.bake_for_and_wait client) in (* 1st scenario: batch; commit; finalize; return bond (OK) *) Log.info "1st scenario: batch; commit; finalize; return bond (OK)" ; let* () = batch_commit_finalize ~rollup_level:0 () in let* () = attempt_return_bond state client ~src ~expected:`Ok in (* 2nd scenario: batch; commit; finalize; batch; commit; finalize; return bond (OK) *) Log.info "2nd scenario: (batch; commit; finalize)x2, return bond (OK)" ; let* () = batch_commit_finalize ~rollup_level:1 () in let* () = batch_commit_finalize ~rollup_level:2 () in let* () = attempt_return_bond state client ~src ~expected:`Ok in (* 3rd scenario: batch; commit; finalize; batch; commit; return bond (KO) *) Log.info "3rd scenario: batch; commit; finalize; batch; commit; return bond (KO)" ; let* () = batch_commit_finalize ~rollup_level:3 () in let* () = batch_commit_finalize ~rollup_level:4 ~finalize:false () in let* () = attempt_return_bond state client ~src ~expected:`Ko in unit (** [test_deposit_withdraw_max_big_tickets] tests to deposit an enormous amount of tickets with maximum payload, then commit the maximum allowed withdrawals, and finally tries to dispatch/transfer all them. This test makes sure we can't hit the size limit of an operation with withdrawals. *) let test_deposit_withdraw_max_big_tickets = Protocol.register_test ~__FILE__ ~title:"deposit and withdraw big tickets" ~tags:["tx_rollup"; "withdraw"; "deposit"; "ticket"] @@ fun protocol -> let parameters = Parameters.{finality_period = 4; withdraw_period = 4} in let* ({rollup; client; _} as state) = init_with_tx_rollup ~parameters ~protocol () in let* constants = RPC.Client.call client @@ RPC.get_chain_block_context_constants () in let max_ticket_payload_size = (* [overhead] is the number of bytes introduced by the wrapping of a string in a ticket. This encompasses the ticketer, amount and ty fields. This value has been fetched from the failing test, and acts as a regression value. *) let overhead = 256 in JSON.(constants |-> "tx_rollup_max_ticket_payload_size" |> as_int) - overhead in let max_withdrawals_per_batch = JSON.(constants |-> "tx_rollup_max_withdrawals_per_batch" |> as_int) in let account = Constant.bootstrap1.public_key_hash in (* The following values make sure we can deposit then withdraw the maximum tickets of the maximum size. *) let l2_amount = Int64.max_int in let ticket_contents = Format.sprintf {|"%s"|} (String.make max_ticket_payload_size 't') in let ticket_contents_ty = "string" in (* 1. originate a deposit contract *) let* _alias, deposit_contract = Client.originate_contract_at ~amount:Tez.zero ~src:account ~init:"Unit" ~burn_cap:Tez.one client ["mini_scenarios"; "tx_rollup_deposit"] protocol in let* () = Client.bake_for_and_wait client in (* 2. Deposit tickets to the tx_rollup. *) (* deposit [max_withdrawals_per_batch] times to be able to withdraw [max_int * max_withdrawals_per_batch] in one operation. Last iteration is done outside of [repeat] to retrieve the [ticket_hash]. *) let* () = repeat (max_withdrawals_per_batch - 1) (fun () -> let* () = Client.transfer ~amount:Tez.zero ~giver:account ~receiver:deposit_contract ~gas_limit:10000 ~arg: (Format.sprintf {|Pair %s %Ld "%s" "%s"|} ticket_contents l2_amount Constant.tx_rollup_l2_address rollup) ~burn_cap:Tez.one client in Client.bake_for_and_wait client) in let process = Client.spawn_transfer ~amount:Tez.zero ~giver:account ~receiver:deposit_contract ~gas_limit:10000 ~arg: (Format.sprintf {|Pair %s %Ld "%s" "%s"|} ticket_contents l2_amount Constant.tx_rollup_l2_address rollup) ~burn_cap:Tez.one client in let* client_output = Process.check_and_read_stdout process in let* ticket_hash = match client_output =~* rex "Ticket hash: ?(expr\\w{50})" with | None -> Test.fail "Cannot extract ticket hash from client_output: %s" client_output | Some hash -> return hash in let* () = Client.bake_for_and_wait client in (* 3. commit the new inbox with deposit and withdraw at the same time. *) let deposit = Rollup.make_deposit ~sender:account ~destination:Constant.tx_rollup_l2_address ~ticket_hash ~amount:l2_amount in let*! withdraw_list_hash = let withdrawals = Rollup.( List.init max_withdrawals_per_batch (fun _i -> let amount = l2_amount in let claimer = account in {claimer; ticket_hash; amount})) in Rollup.withdraw_list_hash ~withdrawals client in let context_hash = Constant.tx_rollup_empty_l2_context in let*! message_result_hash = Rollup.message_result_hash ~context_hash ~withdraw_list_hash client in let*! tx_rollup_state = Rollup.get_state ~rollup client in let tx_rollup_level = 0 in let* () = submit_commitment ~src:account ~level:tx_rollup_level ~roots:[message_result_hash] ?predecessor:tx_rollup_state.commitment_newest_hash ~inbox_content:(`Content [deposit]) state in let* () = (* bake until the finality period is over to be able to withdraw *) repeat parameters.finality_period (fun () -> Client.bake_for_and_wait client) in (* 4. finalize the commitment *) let*! () = submit_finalize_commitment state in let* () = Client.bake_for_and_wait client in (* 5. dispatch tickets from withdrawals to implicit account.*) (* list of ticket dispatch info with contents and ty encoding in json for the client command. *) let* ticket_dispatch_info_data_list = let* ticket_dispatch_info_data = Rollup.get_json_of_ticket_dispatch_info Rollup. { contents = ticket_contents; ty = ticket_contents_ty; ticketer = deposit_contract; amount = l2_amount; claimer = account; } client in let ticket_dispatch_info_data_str = JSON.encode_u ticket_dispatch_info_data in return (List.init max_withdrawals_per_batch (fun _i -> ticket_dispatch_info_data_str)) in let message_position = 0 in let*! message_result_path = Rollup.commitment_merkle_tree_path ~message_result_hashes:[`Hash Constant.tx_rollup_initial_message_result] ~position:0 client in let*! () = Client.Tx_rollup.dispatch_tickets ~burn_cap:Tez.one ~tx_rollup:rollup ~src:account ~level:tx_rollup_level ~message_position ~context_hash ~message_result_path:(message_result_path |> JSON.encode) ~ticket_dispatch_info_data_list client in let* () = Client.bake_for_and_wait client in (* 6. Transfer tickets from implicit account to originated account. *) (* Withdraw contract that has an optional tickets in storage. Drop the previous value. *) let* _alias, withdraw_contract = Client.originate_contract_at ~amount:Tez.zero ~src:account ~init:"None" ~burn_cap:Tez.one client ["mini_scenarios"; "tickets_receive_and_store"] protocol in let* () = Client.bake_for_and_wait client in (* repeat the operation to ensure all tickets can be transfered *) let* () = repeat max_withdrawals_per_batch (fun () -> let*! () = Client.transfer_tickets ~qty:l2_amount ~src:account ~destination:withdraw_contract ~entrypoint:"default" ~contents:ticket_contents ~ty:ticket_contents_ty ~ticketer:deposit_contract ~burn_cap:Tez.one client in Client.bake_for_and_wait client) in unit let register ~protocols = Regressions.register protocols ; test_submit_batches_in_several_blocks protocols ; test_submit_from_originated_source protocols ; test_rollup_with_two_commitments protocols ; test_rollup_last_commitment_is_rejected protocols ; test_rollup_reject_position_one protocols ; test_rollup_wrong_rejection protocols ; test_rollup_wrong_path_for_rejection protocols ; test_rollup_wrong_rejection_long_path protocols ; test_rollup_bond_return protocols ; test_deposit_withdraw_max_big_tickets protocols
(*****************************************************************************) (* *) (* Open Source License *) (* Copyright (c) 2022 Nomadic Labs <contact@nomadic-labs.com> *) (* Copyright (c) 2022 Marigold, <contact@marigold.dev> *) (* Copyright (c) 2022 Oxhead Alpha <info@oxhead-alpha.com> *) (* *) (* Permission is hereby granted, free of charge, to any person obtaining a *) (* copy of this software and associated documentation files (the "Software"),*) (* to deal in the Software without restriction, including without limitation *) (* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) (* and/or sell copies of the Software, and to permit persons to whom the *) (* Software is furnished to do so, subject to the following conditions: *) (* *) (* The above copyright notice and this permission notice shall be included *) (* in all copies or substantial portions of the Software. *) (* *) (* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) (* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) (* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) (* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) (* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) (* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) (* DEALINGS IN THE SOFTWARE. *) (* *) (*****************************************************************************)
clz_decompress.ml
(* Mostly from deflate.gz documentation :) If somebody cares, we could abstract them over Cohttp and Lwt and have an implementation that can work with streamed response bodies and does directly output to file. The current dumb implementation is more than enough for my limited needs. *) let inflate_string_de str = let i = De.bigstring_create De.io_buffer_size in let o = De.bigstring_create De.io_buffer_size in let w = De.make_window ~bits:15 in let r = Buffer.create 0x1000 in let p = ref 0 in let refill buf = let len = min (String.length str - !p) De.io_buffer_size in Bigstringaf.blit_from_string str ~src_off:!p buf ~dst_off:0 ~len; p := !p + len; len in let flush buf len = let str = Bigstringaf.substring buf ~off:0 ~len in Buffer.add_string r str in match De.Higher.uncompress ~w ~refill ~flush i o with | Ok () -> Ok (Buffer.contents r) | Error _ as err -> err let deflate_string_de str = let i = De.bigstring_create De.io_buffer_size in let o = De.bigstring_create De.io_buffer_size in let w = De.Lz77.make_window ~bits:15 in let q = De.Queue.create 0x1000 in let r = Buffer.create 0x1000 in let p = ref 0 in let refill buf = (* assert (buf == i); *) let len = min (String.length str - !p) De.io_buffer_size in Bigstringaf.blit_from_string str ~src_off:!p buf ~dst_off:0 ~len; p := !p + len; len in let flush buf len = (* assert (buf == o); *) let str = Bigstringaf.substring buf ~off:0 ~len in Buffer.add_string r str in De.Higher.compress ~w ~q ~refill ~flush i o; Buffer.contents r let inflate_string_gz str = let i = De.bigstring_create De.io_buffer_size in let o = De.bigstring_create De.io_buffer_size in let r = Buffer.create 0x1000 in let p = ref 0 in let refill buf = let len = min (String.length str - !p) De.io_buffer_size in Bigstringaf.blit_from_string str ~src_off:!p buf ~dst_off:0 ~len; p := !p + len; len in let flush buf len = let str = Bigstringaf.substring buf ~off:0 ~len in Buffer.add_string r str in Gz.Higher.uncompress ~refill ~flush i o |> Result.map (fun _metadata -> Buffer.contents r) let deflate_string_gz ~cfg ?(level = 4) str = let i = De.bigstring_create De.io_buffer_size in let o = De.bigstring_create De.io_buffer_size in let w = De.Lz77.make_window ~bits:15 in let q = De.Queue.create 0x1000 in let r = Buffer.create 0x1000 in let p = ref 0 in let refill buf = let len = min (String.length str - !p) De.io_buffer_size in Bigstringaf.blit_from_string str ~src_off:!p buf ~dst_off:0 ~len; p := !p + len; len in let flush buf len = let str = Bigstringaf.substring buf ~off:0 ~len in Buffer.add_string r str in Gz.Higher.compress ~level ~w ~q ~refill ~flush () cfg i o; Buffer.contents r
(* Mostly from deflate.gz documentation :) If somebody cares, we could abstract them over Cohttp and Lwt and have an implementation that can work with streamed response bodies and does directly output to file. The current dumb implementation is more than enough for my limited needs. *)
clb_intmap.c
#include "clb_intmap.h" /*---------------------------------------------------------------------*/ /* Global Variables */ /*---------------------------------------------------------------------*/ /*---------------------------------------------------------------------*/ /* Forward Declarations */ /*---------------------------------------------------------------------*/ /*---------------------------------------------------------------------*/ /* Internal Functions */ /*---------------------------------------------------------------------*/ /*----------------------------------------------------------------------- // // Function: switch_to_array() // // Return true if representation should switch to array (because of // high density) // // Global Variables: - // // Side Effects : - // /----------------------------------------------------------------------*/ static bool switch_to_array(long old_min, long old_max, long new_key, long entries) { long max_key = MAX(old_max, new_key); long min_key = MIN(old_min, new_key); if((entries * MIN_TREE_DENSITY) > (max_key-min_key)) { return true; } return false; } /*----------------------------------------------------------------------- // // Function: switch_to_tree() // // Return true if representation should switch to tree (because of // low density) // // Global Variables: - // // Side Effects : - // /----------------------------------------------------------------------*/ static bool switch_to_tree(long old_min, long old_max, long new_key, long entries) { long max_key = MAX(old_max, new_key); long min_key = MIN(old_min, new_key); if((entries * MAX_TREE_DENSITY) < (max_key-min_key)) { return true; } return false; } /*----------------------------------------------------------------------- // // Function: add_new_tree_node() // // Add a *new* key node to a IntMap in tree form and return its // address. Assertion fail, if key is not new. Increases element // count! // // Global Variables: - // // Side Effects : Changes tree // /----------------------------------------------------------------------*/ static NumTree_p add_new_tree_node(IntMap_p map, long key, void* val) { NumTree_p handle, check; assert(map->type == IMTree); handle = NumTreeCellAlloc(); handle->key = key; handle->val1.p_val = val; check = NumTreeInsert(&(map->values.tree), handle); UNUSED(check); assert(!check); map->entry_no++; return handle; } /*----------------------------------------------------------------------- // // Function: array_to_tree() // // Convert a IntMap in array form to an equivalent one in tree // form. // // Global Variables: - // // Side Effects : Memory operations // /----------------------------------------------------------------------*/ static void array_to_tree(IntMap_p map) { PDRangeArr_p tmp_arr; IntOrP tmp_val; long i; long max_key = map->min_key; long min_key = map->max_key; assert(map->type == IMArray); tmp_arr = map->values.array; map->values.tree = NULL; map->type = IMTree; map->entry_no = 0; for(i=PDRangeArrLowKey(tmp_arr); i<=map->max_key; i++) { tmp_val.p_val = PDRangeArrElementP(tmp_arr, i); if(tmp_val.p_val) { NumTreeStore(&(map->values.tree), i, tmp_val, tmp_val); map->entry_no++; max_key = i; min_key = MIN(min_key, i); } } map->max_key = max_key; map->min_key = MIN(min_key, max_key); PDRangeArrFree(tmp_arr); } /*----------------------------------------------------------------------- // // Function: tree_to_array() // // Convert a IntMap in tree form to an equivalent one in array // form. // // Global Variables: - // // Side Effects : Memory operations // /----------------------------------------------------------------------*/ static void tree_to_array(IntMap_p map) { PDRangeArr_p tmp_arr; long max_key = map->min_key; long min_key = map->max_key; PStack_p tree_iterator; NumTree_p handle; assert(map->type == IMTree); map->entry_no = 0; tmp_arr = PDRangeArrAlloc(map->min_key, IM_ARRAY_SIZE); tree_iterator = NumTreeTraverseInit(map->values.tree); while((handle = NumTreeTraverseNext(tree_iterator))) { if(handle->val1.p_val) { PDRangeArrAssignP(tmp_arr, handle->key, handle->val1.p_val); map->entry_no++; max_key = handle->key; min_key = MIN(min_key, handle->key); } } NumTreeTraverseExit(tree_iterator); NumTreeFree(map->values.tree); map->max_key = max_key; map->min_key = MIN(min_key, max_key); map->values.array = tmp_arr; map->type = IMArray; } /*---------------------------------------------------------------------*/ /* Exported Functions */ /*---------------------------------------------------------------------*/ /*----------------------------------------------------------------------- // // Function: IntMapAlloc() // // Allocate an empty int mapper. // // Global Variables: - // // Side Effects : Memory operations // /----------------------------------------------------------------------*/ IntMap_p IntMapAlloc(void) { IntMap_p handle = IntMapCellAlloc(); handle->type = IMEmpty; return handle; } /*----------------------------------------------------------------------- // // Function: IntMapFree() // // Free an int mapper (does _not_ free pointed-to elements). // // Global Variables: - // // Side Effects : Memory operations // /----------------------------------------------------------------------*/ void IntMapFree(IntMap_p map) { assert(map); switch(map->type) { case IMEmpty: case IMSingle: break; case IMArray: PDRangeArrFree(map->values.array); break; case IMTree: NumTreeFree(map->values.tree); break; default: assert(false && "Unknown IntMap type."); } IntMapCellFree(map); } /*----------------------------------------------------------------------- // // Function: IntMapGetVal() // // Given a key, return the associated value or NULL, if no suitable // key/value pair exists. // // Global Variables: // // Side Effects : // /----------------------------------------------------------------------*/ void* IntMapGetVal(IntMap_p map, long key) { void* res = NULL; if(!map) { return NULL; } switch(map->type) { case IMEmpty: break; case IMSingle: if(map->max_key == key) { res = map->values.value; } break; case IMArray: if(key <= map->max_key) { res = PDRangeArrElementP(map->values.array, key); } break; case IMTree: if(key <= map->max_key) { NumTree_p entry = NumTreeFind(&(map->values.tree), key); if(entry) { res = entry->val1.p_val; } } break; default: assert(false && "Unknown IntMap type."); } return res; } /*----------------------------------------------------------------------- // // Function: IntMapGetRef() // // Get a reference to the address of the value of a key/value // pair. Note that this always creates the key value pair (with // empty value) if it does not exist yet. // // Global Variables: - // // Side Effects : May reorganize the map. // /----------------------------------------------------------------------*/ void** IntMapGetRef(IntMap_p map, long key) { void **res = NULL; void *val; NumTree_p handle; IntOrP tmp; assert(map); /* printf("IntMapGetRef(%p,%ld) type %d, entries=%ld, maxkey=%ld...\n", map, key, map->type,map->entry_no, map->max_key); */ switch(map->type) { case IMEmpty: map->type = IMSingle; map->max_key = key; map->min_key = key; map->values.value = NULL; res = &(map->values.value); map->entry_no = 1; break; case IMSingle: if(key == map->max_key) { res = &(map->values.value); } else if(switch_to_array(key, map->min_key, map->max_key, 2)) { map->type = IMArray; val = map->values.value; map->values.array = PDRangeArrAlloc(MIN(key, map->max_key), IM_ARRAY_SIZE); PDRangeArrAssignP(map->values.array, map->max_key, val); PDRangeArrAssignP(map->values.array, key, NULL); res = &(PDRangeArrElementP(map->values.array, key)); map->entry_no = 2; } else { map->type = IMTree; val = map->values.value; map->values.tree = NULL; tmp.p_val = val; NumTreeStore(&(map->values.tree),map->max_key, tmp, tmp); handle = add_new_tree_node(map, key, NULL); res = &(handle->val1.p_val); map->entry_no = 2; } map->min_key = MIN(map->min_key, key); map->max_key = MAX(key, map->max_key); break; case IMArray: if(((key > map->max_key)||(key<map->min_key)) && switch_to_tree(map->min_key, map->max_key, key, map->entry_no+1)) { array_to_tree(map); res = IntMapGetRef(map, key); } else { res = &(PDRangeArrElementP(map->values.array, key)); if(!(*res)) { map->entry_no++; } } map->min_key=MIN(map->min_key, key); map->max_key=MAX(map->max_key, key); break; case IMTree: handle = NumTreeFind(&(map->values.tree), key); if(handle) { res = &(handle->val1.p_val); } else { if(switch_to_array(map->min_key, map->max_key, key, map->entry_no+1)) { tree_to_array(map); res = IntMapGetRef(map, key); } else { handle = add_new_tree_node(map, key, NULL); map->max_key=MAX(map->max_key, key); map->min_key=MIN(map->min_key, key); res = &(handle->val1.p_val); } } break; default: assert(false && "Unknown IntMap type."); } assert(res); return res; } /*----------------------------------------------------------------------- // // Function: IntMapAssign() // // Add key/value pair to map, overriding any previous association. // // Global Variables: - // // Side Effects : Changes map, may trigger reorganization // /----------------------------------------------------------------------*/ void IntMapAssign(IntMap_p map, long key, void* value) { void** ref; assert(map); ref = IntMapGetRef(map, key); *ref = value; } /*----------------------------------------------------------------------- // // Function: IntMapDelKey() // // Delete a key/value association. If there was one, return the // value, otherwise return NULL. // // **Currently, arrays never shrink. This might be worth // **changing (unlikely, though). // // Global Variables: - // // Side Effects : May reorganize map // /----------------------------------------------------------------------*/ void* IntMapDelKey(IntMap_p map, long key) { void* res = NULL; NumTree_p handle; assert(map); switch(map->type) { case IMEmpty: res = NULL; break; case IMSingle: if(key == map->max_key) { res = map->values.value; map->type = IMEmpty; map->entry_no = 0; } break; case IMArray: if(key > map->max_key) { res = NULL; } /* if key == map->max_key optionally do something (shrink * array, recompute map->max_key - likely unnecessary at * least for my current applications */ else if((res = PDRangeArrElementP(map->values.array, key))) { PDRangeArrAssignP(map->values.array, key, NULL); map->entry_no--; if(switch_to_tree(map->min_key, map->max_key, map->max_key, map->entry_no)) { array_to_tree(map); } } break; case IMTree: handle = NumTreeExtractEntry(&(map->values.tree), key); if(handle) { map->entry_no--; res = handle->val1.p_val; if(handle->key == map->max_key) { if(map->values.tree) { map->max_key = NumTreeMaxKey(map->values.tree); } else { map->max_key = map->min_key; } if(switch_to_array(map->min_key, map->max_key, map->max_key, map->entry_no)) { tree_to_array(map); } } NumTreeCellFree(handle); } break; default: assert(false && "Unknown IntMap type."); break; } return res; } /*----------------------------------------------------------------------- // // Function: IntMapIterAlloc() // // Allocate an iterator object iterating over key range lower_key to // upper_key (both inclusive) in map. This is only valid as long as // no new key is introduced or old key is deleted. // // Global Variables: - // // Side Effects : Memory operations. // /----------------------------------------------------------------------*/ IntMapIter_p IntMapIterAlloc(IntMap_p map, long lower_key, long upper_key) { IntMapIter_p handle = IntMapIterCellAlloc(); handle->map = map; if(map) { handle->lower_key = MAX(lower_key, map->min_key); handle->upper_key = MIN(upper_key, map->max_key); switch(map->type) { case IMEmpty: break; case IMSingle: handle->admin_data.seen = true; if((map->max_key >= lower_key) && (map->max_key <= upper_key)) { handle->admin_data.seen = false; } break; case IMArray: handle->admin_data.current = lower_key; break; case IMTree: handle->admin_data.tree_iter = NumTreeLimitedTraverseInit(map->values.tree, lower_key); break; default: assert(false && "Unknown IntMap type."); break; } } return handle; } /*----------------------------------------------------------------------- // // Function: IntMapIterFree() // // Free an IntMapIterator. // // Global Variables: - // // Side Effects : - // /----------------------------------------------------------------------*/ void IntMapIterFree(IntMapIter_p junk) { assert(junk); if(junk->map) { switch(junk->map->type) { case IMEmpty: case IMSingle: case IMArray: break; case IMTree: PStackFree(junk->admin_data.tree_iter); break; default: assert(false && "Unknown IntMap type."); break; } } IntMapIterCellFree(junk); } /*----------------------------------------------------------------------- // // Function: IntMapDebugPrint() // // Print an intmap datatype as a list of key:value pairs. // // Global Variables: - // // Side Effects : Output, memory operations // /----------------------------------------------------------------------*/ void IntMapDebugPrint(FILE* out, IntMap_p map) { IntMapIter_p iter = IntMapIterAlloc(map,0, LONG_MAX); void* val; long key = 0; fprintf(out, "# ==== IntMapType %d Size = %ld\n", map->type, IntMapStorage(map)); for(val=IntMapIterNext(iter, &key); val; val=IntMapIterNext(iter, &key)) { fprintf(out, "# %5ld : %p\n", key, val); } fprintf(out, "# ==== IntMap End\n"); IntMapIterFree(iter); } /*---------------------------------------------------------------------*/ /* End of File */ /*---------------------------------------------------------------------*/
/*----------------------------------------------------------------------- File : clb_intmap.c Author: Stephan Schulz (schulz@eprover.org) Contents Functions implementing the multi-representation N_0->void* mapping data type. Copyright 2004 by the author. This code is released under the GNU General Public Licence and the GNU Lesser General Public License. See the file COPYING in the main E directory for details.. Run "eprover -h" for contact information. Changes <1> Mon Dec 27 17:34:48 CET 2004 New -----------------------------------------------------------------------*/
number_of_partitions_nmod_vec.c
#include "arith.h" void arith_number_of_partitions_nmod_vec(mp_ptr res, slong len, nmod_t mod) { mp_ptr tmp; mp_limb_t r; slong k, n; r = mod.n - UWORD(1); if (len < 1) return; tmp = _nmod_vec_init(len); _nmod_vec_zero(tmp, len); tmp[0] = UWORD(1); for (n = k = 1; n + 4*k + 2 < len; k += 2) { tmp[n] = r; tmp[n + k] = r; tmp[n + 3*k + 1] = UWORD(1); tmp[n + 4*k + 2] = UWORD(1); n += 6*k + 5; } if (n < len) tmp[n] = r; if (n + k < len) tmp[n + k] = r; if (n + 3*k + 1 < len) tmp[n + 3*k + 1] = WORD(1); _nmod_poly_inv_series(res, tmp, len, len, mod); _nmod_vec_clear(tmp); }
/* Copyright (C) 2011 Fredrik Johansson This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
test.ml
let () = Test_http.(with_server servers) (fun () -> Irmin_test.Store.run "irmin-http" ~misc:[] ~sleep:Lwt_unix.sleep Test_http.(suites servers))
(* * Copyright (c) 2013-2022 Thomas Gazagnaire <thomas@gazagnaire.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. *)
t-sqr.c
#include "fq_nmod_poly.h" #ifdef T #undef T #endif #define T fq_nmod #define CAP_T FQ_NMOD #include "fq_poly_templates/test/t-sqr.c" #undef CAP_T #undef T
/* Copyright (C) 2013 Mike Hansen This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
text_tag_bind.mli
##ifdef CAMLTK val tag_bind: widget -> textTag -> (modifier list * xEvent) list -> bindAction -> unit ##else val tag_bind : tag: string -> events: event list -> ?extend: bool -> ?breakable: bool -> ?fields: eventField list -> ?action: (eventInfo -> unit) -> text widget -> unit ##endif
or_error.ml
type ('a, 'e) result = ('a, 'e) Result.result = Ok of 'a | Error of 'e type msg = [ `Msg of string ] let ( >>= ) r f = match r with Ok v -> f v | Error _ as e -> e
gen_constants_c.ml
let c_headers = {| #include <srt/srt.h> #ifndef SRT_ENABLE_LOSTBYTESCOUNT #define SRT_ENABLE_LOSTBYTESCOUNT 0 #endif #if (SRT_VERSION_MAJOR <= 1) && (SRT_VERSION_MINOR <= 4) && (SRT_VERSION_PATCH <= 1) #define SRT_EPOLLEMPTY SRT_EUNKNOWN #define SRT_ESCLOSED SRT_EUNKNOWN #define SRT_ESYSOBJ SRT_EUNKNOWN #endif |} let () = let fname = Sys.argv.(1) in let oc = open_out_bin fname in let format = Format.formatter_of_out_channel oc in Format.fprintf format "%s@\n" c_headers; Cstubs.Types.write_c format (module Srt_constants.Def); Format.pp_print_flush format (); close_out oc
ecaml_test.ml
parse.ml
(* This file is free software, part of dolmen. See file "LICENSE" formore information *) (** Interface for Dolmen parsers. *) module type S = sig (** {2 Main interface} *) type token (** The type of token consumed by the parser. *) type statement (** The type of top-level declarations returned by the parser. *) exception Error of int (** Exception raised by the parser when it encounters an error. *) val file : (Lexing.lexbuf -> token) -> Lexing.lexbuf -> statement list (** A function that parses an entire file, i.e until the end-of-file token, and return the list of parsed statements. *) val input : (Lexing.lexbuf -> token) -> Lexing.lexbuf -> statement option (** A function to parse a single statement. Returns [None] if it encounters the end-of-file token. Used for incremental parsing. *) end
t-scalar_mul_ui.c
#include <stdio.h> #include <stdlib.h> #include "fmpz_mpoly.h" int main(void) { int i, j, result; FLINT_TEST_INIT(state); flint_printf("scalar_mul_ui...."); fflush(stdout); /* Check (f*a)*b = f*(a*b) */ for (i = 0; i < 10 * flint_test_multiplier(); i++) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k; ulong a, b, c; slong len, coeff_bits, exp_bits; fmpz_mpoly_ctx_init_rand(ctx, state, 20); fmpz_mpoly_init(f, ctx); fmpz_mpoly_init(g, ctx); fmpz_mpoly_init(h, ctx); fmpz_mpoly_init(k, ctx); len = n_randint(state, 100); exp_bits = n_randint(state, 200) + 1; coeff_bits = n_randint(state, 200); for (j = 0; j < 10; j++) { fmpz_mpoly_randtest_bits(f, state, len, coeff_bits, exp_bits, ctx); fmpz_mpoly_randtest_bits(g, state, len, coeff_bits, exp_bits, ctx); fmpz_mpoly_randtest_bits(h, state, len, coeff_bits, exp_bits, ctx); fmpz_mpoly_randtest_bits(k, state, len, coeff_bits, exp_bits, ctx); a = n_randbits(state, n_randint(state, FLINT_BITS/2) + 1); b = n_randbits(state, n_randint(state, FLINT_BITS/2) + 1); c = a*b; fmpz_mpoly_scalar_mul_ui(g, f, a, ctx); fmpz_mpoly_scalar_mul_ui(h, g, b, ctx); fmpz_mpoly_scalar_mul_ui(k, f, c, ctx); result = fmpz_mpoly_equal(h, k, ctx); if (!result) { printf("FAIL\n"); flint_printf("Check (f*a)*b = f*(a*b)\ni = %wd, j = %wd\n", i,j); fflush(stdout); flint_abort(); } } fmpz_mpoly_clear(f, ctx); fmpz_mpoly_clear(g, ctx); fmpz_mpoly_clear(h, ctx); fmpz_mpoly_clear(k, ctx); } /* Check aliasing */ for (i = 0; i < 10 * flint_test_multiplier(); i++) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h; ulong c; slong len, coeff_bits, exp_bits; fmpz_mpoly_ctx_init_rand(ctx, state, 20); fmpz_mpoly_init(f, ctx); fmpz_mpoly_init(g, ctx); fmpz_mpoly_init(h, ctx); len = n_randint(state, 100); exp_bits = n_randint(state, 200) + 1; coeff_bits = n_randint(state, 200); for (j = 0; j < 10; j++) { fmpz_mpoly_randtest_bits(f, state, len, coeff_bits, exp_bits, ctx); fmpz_mpoly_randtest_bits(h, state, len, coeff_bits, exp_bits, ctx); c = n_randtest(state); fmpz_mpoly_set(g, f, ctx); fmpz_mpoly_scalar_mul_ui(h, f, c, ctx); fmpz_mpoly_scalar_mul_ui(g, g, c, ctx); result = fmpz_mpoly_equal(g, h, ctx); if (!result) { printf("FAIL\n"); flint_printf("Check aliasing\ni = %wd, j = %wd\n", i,j); fflush(stdout); flint_abort(); } } fmpz_mpoly_clear(f, ctx); fmpz_mpoly_clear(g, ctx); fmpz_mpoly_clear(h, ctx); } FLINT_TEST_CLEANUP(state); flint_printf("PASS\n"); return 0; }
/* Copyright (C) 2017 William Hart Copyright (C) 2018 Daniel Schultz This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
gen_automaton_tables.ml
(* Parsing of S-expression. The parsing is written as an automaton for which we provide different implementations of actions. *) open! Base module Automaton = Parsexp_symbolic_automaton.Automaton module Parse_error_reason = Parsexp_symbolic_automaton.Parse_error_reason module Table = Parsexp_symbolic_automaton.Table (* Sharing of transitions *) module Sharing = struct let create_assign_id () = let cache = Hashtbl.Poly.create () in ( cache , fun x -> if not (Hashtbl.mem cache x) then Hashtbl.add_exn cache ~key:x ~data:(Hashtbl.length cache) ) ;; let share (table : Table.t) = let transitions, assign_transition_id = create_assign_id () in let transitions_eoi, assign_transition_eoi_id = create_assign_id () in Array.iter table.transitions ~f:assign_transition_id; Array.iter table.transitions_eoi ~f:assign_transition_eoi_id; transitions, transitions_eoi ;; end let pr fmt = Printf.ksprintf Stdio.print_endline fmt let ordered_ids tbl = Hashtbl.fold tbl ~init:[] ~f:(fun ~key:x ~data:id acc -> (id, x) :: acc) |> List.sort ~compare:(fun (id1, _) (id2, _) -> compare id1 id2) ;; let print_named_transition (id, tr) = (match (tr : Table.Transition.t Table.Or_parse_error_reason.t) with | Error error -> pr "let tr_%02d_f _state _char _stack =" id; pr " raise _state ~at_eof:false %s" (Parse_error_reason.to_string error) | Ok { action = eps_actions, action; goto; advance } -> let eps_actions = List.filter_map ~f:Automaton.epsilon_action_to_runtime_function eps_actions in let action = Automaton.action_to_runtime_function action in pr "let tr_%02d_f state %schar stack =" id (if Option.is_none action && not ([%compare.equal: Table.Goto_state.t] goto End_block_comment) then "_" else ""); List.iter eps_actions ~f:(pr " let stack = %s state stack in"); (match action with | None -> () | Some s -> pr " let stack = %s state char stack in" s); (match goto with | State n -> pr " set_automaton_state state %d;" n | End_block_comment -> pr " let stack = end_block_comment state char stack in"; pr " set_automaton_state state (if block_comment_depth state <> 0 then %d else %d);" (Automaton.State.to_int (Block_comment Normal)) (Automaton.State.to_int Whitespace)); pr " %s state;" (match advance with | Advance -> "advance" | Advance_eol -> "advance_eol"); pr " stack"); pr "let tr_%02d : Automaton_action.Poly.t = { f = tr_%02d_f }" id id ;; let print_named_transition_eoi (id, tr) = (match (tr : Automaton.Epsilon_action.t list Table.Or_parse_error_reason.t) with | Error error -> pr "let tr_eoi_%02d_f state _stack =" id; pr " raise state ~at_eof:true %s" (Parse_error_reason.to_string error) | Ok eps_actions -> pr "let tr_eoi_%02d_f state stack =" id; let eps_actions = List.filter_map eps_actions ~f:Automaton.epsilon_action_to_runtime_function in List.iter eps_actions ~f:(pr " let stack = %s state stack in"); pr " eps_eoi_check state stack"); pr "let tr_eoi_%02d : Automaton_action.Epsilon.Poly.t = { f = tr_eoi_%02d_f }" id id ;; let print_table suffix tbl ids = Array.map tbl ~f:(fun tr -> Printf.sprintf "tr%s_%02d" suffix (Hashtbl.find_exn ids tr)) |> Array.to_list |> String.concat ~sep:";" |> Stdio.printf "let transitions%s = [| %s |]" suffix ;; let print_old_parser_approx_cont_states () = List.map Automaton.State.all ~f:Automaton.State.old_parser_approx_cont_state |> String.concat ~sep:";" |> Stdio.printf "let old_parser_approx_cont_states : Old_parser_cont_state.t array = [| %s |]" ;; let print_code () = let table = Parsexp_symbolic_automaton.table in let named_transitions, named_transitions_eoi = Sharing.share table in List.iter (ordered_ids named_transitions) ~f:print_named_transition; List.iter (ordered_ids named_transitions_eoi) ~f:print_named_transition_eoi; print_table "" table.transitions named_transitions; print_table "_eoi" table.transitions_eoi named_transitions_eoi; print_old_parser_approx_cont_states () ;;
(* Parsing of S-expression. The parsing is written as an automaton for which we provide different implementations of actions. *)
Array.mli
val zip_with : ('a -> 'b -> 'c) -> 'a array -> 'b array -> 'c array val zip : 'a array -> 'b array -> ('a * 'b) array module type EQ_F = functor (E : Interface.EQ) -> sig type t = E.t array val eq : t -> t -> bool end module type ORD_F = functor (O : Interface.ORD) -> sig type t = O.t array val eq : t -> t -> bool val compare : t -> t -> Interface.ordering end module type SHOW_F = functor (S : Interface.SHOW) -> sig type t = S.t array val show : t -> string end module type TRAVERSABLE_F = functor (A : Interface.APPLICATIVE) -> sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val fold_left : ('a -> 'b -> 'a) -> 'a -> 'b t -> 'a val fold_right : ('b -> 'a -> 'a) -> 'a -> 'b t -> 'a module Fold_Map : functor (M : Interface.MONOID) -> sig val fold_map : ('a -> M.t) -> 'a t -> M.t end module Fold_Map_Any : functor (M : Interface.MONOID_ANY) -> sig val fold_map : ('a -> 'b M.t) -> 'a t -> 'b M.t end module Fold_Map_Plus : functor (P : Interface.PLUS) -> sig val fold_map : ('a -> 'b P.t) -> 'a t -> 'b P.t end type 'a applicative_t = 'a A.t val traverse : ('a -> 'b applicative_t) -> 'a t -> 'b t applicative_t val sequence : 'a applicative_t t -> 'a t applicative_t end module Functor : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t end module Alt : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val alt : 'a t -> 'a t -> 'a t end module Apply : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val apply : ('a -> 'b) t -> 'a t -> 'b t end module Applicative : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val apply : ('a -> 'b) t -> 'a t -> 'b t val pure : 'a -> 'a t end module Monad : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val apply : ('a -> 'b) t -> 'a t -> 'b t val pure : 'a -> 'a t val flat_map : 'a t -> ('a -> 'b t) -> 'b t end module Foldable : sig type 'a t = 'a array val fold_left : ('a -> 'b -> 'a) -> 'a -> 'b t -> 'a val fold_right : ('b -> 'a -> 'a) -> 'a -> 'b t -> 'a module Fold_Map : functor (M : Interface.MONOID) -> sig val fold_map : ('a -> M.t) -> 'a t -> M.t end module Fold_Map_Any : functor (M : Interface.MONOID_ANY) -> sig val fold_map : ('a -> 'b M.t) -> 'a t -> 'b M.t end module Fold_Map_Plus : functor (P : Interface.PLUS) -> sig val fold_map : ('a -> 'b P.t) -> 'a t -> 'b P.t end end module Unfoldable : sig type 'a t = 'a array val unfold : ('a -> ('a * 'a) option) -> 'a -> 'a t end module Traversable : TRAVERSABLE_F module Eq : EQ_F module Ord : ORD_F module Show : SHOW_F module Invariant : sig type 'a t = 'a array val imap : ('a -> 'b) -> ('b -> 'a) -> 'a t -> 'b t end module Extend : sig type 'a t = 'a array val map : ('a -> 'b) -> 'a t -> 'b t val extend : ('a t -> 'b) -> 'a t -> 'b t end module Infix : sig val ( <$> ) : ('a -> 'b) -> 'a Monad.t -> 'b Monad.t val ( <@> ) : 'a Monad.t -> ('a -> 'b) -> 'b Monad.t val ( <*> ) : ('a -> 'b) Monad.t -> 'a Monad.t -> 'b Monad.t val ( >>= ) : 'a Monad.t -> ('a -> 'b Monad.t) -> 'b Monad.t val ( =<< ) : ('a -> 'b Monad.t) -> 'a Monad.t -> 'b Monad.t val ( >=> ) : ('a -> 'b Monad.t) -> ('b -> 'c Monad.t) -> 'a -> 'c Monad.t val ( <=< ) : ('a -> 'b Monad.t) -> ('c -> 'a Monad.t) -> 'c -> 'b Monad.t val ( <<= ) : ('a Extend.t -> 'b) -> 'a Extend.t -> 'b Extend.t val ( =>> ) : 'a Extend.t -> ('a Extend.t -> 'b) -> 'b Extend.t end
p-mul_fft.c
#include <stdio.h> #include <stdlib.h> #include "profiler.h" #include "flint.h" #include "fmpz_mat.h" #include "fmpz.h" #include "ulong_extras.h" #include "test_helpers.h" int main(void) { slong i, j, reps; slong dim, sz; fmpz_mat_t A, B, C, D; fmpz_t t; timeit_t timer; slong time1, time2; reps = 1; for (dim = 6; dim > 0; dim--) { flint_printf("****** %wd x %wd *********\n", dim, dim); for (sz = 10000; sz < 100000; sz += 10000) { fmpz_mat_init(A, dim, dim); fmpz_mat_init(B, dim, dim); fmpz_mat_init(C, dim, dim); fmpz_mat_init(D, dim, dim); fmpz_init_set_ui(t, 1); flint_printf("sz = %wd\n", sz); fmpz_mul_2exp(t, t, FLINT_BITS*sz); fmpz_sub_ui(t, t, 1); for (i = 0; i < dim; i++) for (j = 0; j < dim; j++) { fmpz_set(fmpz_mat_entry(A, i, j), t); fmpz_set(fmpz_mat_entry(B, i, j), t); } for (i = 0; i < reps; i++) { timeit_start(timer); fmpz_mat_mul_fft(C, A, B); timeit_stop(timer); time1 = timer->wall; timeit_start(timer); fmpz_mat_mul_classical(D, A, B); timeit_stop(timer); time2 = timer->wall; flint_printf("new: %wd, old: %wd, ratio: %f\n", time1, time2, (double)time2/(double)time1); FLINT_TEST(fmpz_mat_equal(C, D)); } fmpz_clear(t); fmpz_mat_clear(A); fmpz_mat_clear(B); fmpz_mat_clear(C); fmpz_mat_clear(D); } } return 0; }
/* Copyright 2021 Daniel Schultz This file is part of FLINT. FLINT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. See <https://www.gnu.org/licenses/>. */
pa_ocanren_test.ml
open Pcaml let () = Pcaml.inter_phrases := Some (";\n") let pa1 = PAPR.Implem.pa1 let pr = PAPR.Implem.pr let fmt_string s = Printf.sprintf "<<%s>>" s type instance = { name : string ; code : string ; expect : string } let mktest i = i.name >:: (fun _ -> assert_equal ~msg:"not equal" ~printer:fmt_string i.expect (pr (pa1 i.code)) ) let _ = let ast = () in ()